extents.c revision dd54567a83057821a667109a5cadec6eed0295de
1/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * Architecture independence:
6 *   Copyright (c) 2005, Bull S.A.
7 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21 */
22
23/*
24 * Extents support for EXT4
25 *
26 * TODO:
27 *   - ext4*_error() should be used in some situations
28 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 *   - smart tree reduction
30 */
31
32#include <linux/module.h>
33#include <linux/fs.h>
34#include <linux/time.h>
35#include <linux/ext4_jbd2.h>
36#include <linux/jbd.h>
37#include <linux/highuid.h>
38#include <linux/pagemap.h>
39#include <linux/quotaops.h>
40#include <linux/string.h>
41#include <linux/slab.h>
42#include <linux/falloc.h>
43#include <linux/ext4_fs_extents.h>
44#include <asm/uaccess.h>
45
46
47/*
48 * ext_pblock:
49 * combine low and high parts of physical block number into ext4_fsblk_t
50 */
51static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
52{
53	ext4_fsblk_t block;
54
55	block = le32_to_cpu(ex->ee_start);
56	block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
57	return block;
58}
59
60/*
61 * idx_pblock:
62 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
63 */
64static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
65{
66	ext4_fsblk_t block;
67
68	block = le32_to_cpu(ix->ei_leaf);
69	block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
70	return block;
71}
72
73/*
74 * ext4_ext_store_pblock:
75 * stores a large physical block number into an extent struct,
76 * breaking it into parts
77 */
78static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
79{
80	ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
81	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
82}
83
84/*
85 * ext4_idx_store_pblock:
86 * stores a large physical block number into an index struct,
87 * breaking it into parts
88 */
89static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
90{
91	ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
92	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
93}
94
95static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
96{
97	int err;
98
99	if (handle->h_buffer_credits > needed)
100		return handle;
101	if (!ext4_journal_extend(handle, needed))
102		return handle;
103	err = ext4_journal_restart(handle, needed);
104
105	return handle;
106}
107
108/*
109 * could return:
110 *  - EROFS
111 *  - ENOMEM
112 */
113static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
114				struct ext4_ext_path *path)
115{
116	if (path->p_bh) {
117		/* path points to block */
118		return ext4_journal_get_write_access(handle, path->p_bh);
119	}
120	/* path points to leaf/index in inode body */
121	/* we use in-core data, no need to protect them */
122	return 0;
123}
124
125/*
126 * could return:
127 *  - EROFS
128 *  - ENOMEM
129 *  - EIO
130 */
131static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
132				struct ext4_ext_path *path)
133{
134	int err;
135	if (path->p_bh) {
136		/* path points to block */
137		err = ext4_journal_dirty_metadata(handle, path->p_bh);
138	} else {
139		/* path points to leaf/index in inode body */
140		err = ext4_mark_inode_dirty(handle, inode);
141	}
142	return err;
143}
144
145static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
146			      struct ext4_ext_path *path,
147			      ext4_fsblk_t block)
148{
149	struct ext4_inode_info *ei = EXT4_I(inode);
150	ext4_fsblk_t bg_start;
151	ext4_grpblk_t colour;
152	int depth;
153
154	if (path) {
155		struct ext4_extent *ex;
156		depth = path->p_depth;
157
158		/* try to predict block placement */
159		ex = path[depth].p_ext;
160		if (ex)
161			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
162
163		/* it looks like index is empty;
164		 * try to find starting block from index itself */
165		if (path[depth].p_bh)
166			return path[depth].p_bh->b_blocknr;
167	}
168
169	/* OK. use inode's group */
170	bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
171		le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
172	colour = (current->pid % 16) *
173			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
174	return bg_start + colour + block;
175}
176
177static ext4_fsblk_t
178ext4_ext_new_block(handle_t *handle, struct inode *inode,
179			struct ext4_ext_path *path,
180			struct ext4_extent *ex, int *err)
181{
182	ext4_fsblk_t goal, newblock;
183
184	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
185	newblock = ext4_new_block(handle, inode, goal, err);
186	return newblock;
187}
188
189static int ext4_ext_space_block(struct inode *inode)
190{
191	int size;
192
193	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
194			/ sizeof(struct ext4_extent);
195#ifdef AGGRESSIVE_TEST
196	if (size > 6)
197		size = 6;
198#endif
199	return size;
200}
201
202static int ext4_ext_space_block_idx(struct inode *inode)
203{
204	int size;
205
206	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
207			/ sizeof(struct ext4_extent_idx);
208#ifdef AGGRESSIVE_TEST
209	if (size > 5)
210		size = 5;
211#endif
212	return size;
213}
214
215static int ext4_ext_space_root(struct inode *inode)
216{
217	int size;
218
219	size = sizeof(EXT4_I(inode)->i_data);
220	size -= sizeof(struct ext4_extent_header);
221	size /= sizeof(struct ext4_extent);
222#ifdef AGGRESSIVE_TEST
223	if (size > 3)
224		size = 3;
225#endif
226	return size;
227}
228
229static int ext4_ext_space_root_idx(struct inode *inode)
230{
231	int size;
232
233	size = sizeof(EXT4_I(inode)->i_data);
234	size -= sizeof(struct ext4_extent_header);
235	size /= sizeof(struct ext4_extent_idx);
236#ifdef AGGRESSIVE_TEST
237	if (size > 4)
238		size = 4;
239#endif
240	return size;
241}
242
243static int
244ext4_ext_max_entries(struct inode *inode, int depth)
245{
246	int max;
247
248	if (depth == ext_depth(inode)) {
249		if (depth == 0)
250			max = ext4_ext_space_root(inode);
251		else
252			max = ext4_ext_space_root_idx(inode);
253	} else {
254		if (depth == 0)
255			max = ext4_ext_space_block(inode);
256		else
257			max = ext4_ext_space_block_idx(inode);
258	}
259
260	return max;
261}
262
263static int __ext4_ext_check_header(const char *function, struct inode *inode,
264					struct ext4_extent_header *eh,
265					int depth)
266{
267	const char *error_msg;
268	int max = 0;
269
270	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
271		error_msg = "invalid magic";
272		goto corrupted;
273	}
274	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
275		error_msg = "unexpected eh_depth";
276		goto corrupted;
277	}
278	if (unlikely(eh->eh_max == 0)) {
279		error_msg = "invalid eh_max";
280		goto corrupted;
281	}
282	max = ext4_ext_max_entries(inode, depth);
283	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
284		error_msg = "too large eh_max";
285		goto corrupted;
286	}
287	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
288		error_msg = "invalid eh_entries";
289		goto corrupted;
290	}
291	return 0;
292
293corrupted:
294	ext4_error(inode->i_sb, function,
295			"bad header in inode #%lu: %s - magic %x, "
296			"entries %u, max %u(%u), depth %u(%u)",
297			inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
298			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
299			max, le16_to_cpu(eh->eh_depth), depth);
300
301	return -EIO;
302}
303
304#define ext4_ext_check_header(inode, eh, depth)	\
305	__ext4_ext_check_header(__FUNCTION__, inode, eh, depth)
306
307#ifdef EXT_DEBUG
308static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
309{
310	int k, l = path->p_depth;
311
312	ext_debug("path:");
313	for (k = 0; k <= l; k++, path++) {
314		if (path->p_idx) {
315		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
316			    idx_pblock(path->p_idx));
317		} else if (path->p_ext) {
318			ext_debug("  %d:%d:%llu ",
319				  le32_to_cpu(path->p_ext->ee_block),
320				  ext4_ext_get_actual_len(path->p_ext),
321				  ext_pblock(path->p_ext));
322		} else
323			ext_debug("  []");
324	}
325	ext_debug("\n");
326}
327
328static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
329{
330	int depth = ext_depth(inode);
331	struct ext4_extent_header *eh;
332	struct ext4_extent *ex;
333	int i;
334
335	if (!path)
336		return;
337
338	eh = path[depth].p_hdr;
339	ex = EXT_FIRST_EXTENT(eh);
340
341	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
342		ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
343			  ext4_ext_get_actual_len(ex), ext_pblock(ex));
344	}
345	ext_debug("\n");
346}
347#else
348#define ext4_ext_show_path(inode,path)
349#define ext4_ext_show_leaf(inode,path)
350#endif
351
352static void ext4_ext_drop_refs(struct ext4_ext_path *path)
353{
354	int depth = path->p_depth;
355	int i;
356
357	for (i = 0; i <= depth; i++, path++)
358		if (path->p_bh) {
359			brelse(path->p_bh);
360			path->p_bh = NULL;
361		}
362}
363
364/*
365 * ext4_ext_binsearch_idx:
366 * binary search for the closest index of the given block
367 * the header must be checked before calling this
368 */
369static void
370ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
371{
372	struct ext4_extent_header *eh = path->p_hdr;
373	struct ext4_extent_idx *r, *l, *m;
374
375
376	ext_debug("binsearch for %d(idx):  ", block);
377
378	l = EXT_FIRST_INDEX(eh) + 1;
379	r = EXT_LAST_INDEX(eh);
380	while (l <= r) {
381		m = l + (r - l) / 2;
382		if (block < le32_to_cpu(m->ei_block))
383			r = m - 1;
384		else
385			l = m + 1;
386		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
387				m, le32_to_cpu(m->ei_block),
388				r, le32_to_cpu(r->ei_block));
389	}
390
391	path->p_idx = l - 1;
392	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
393		  idx_pblock(path->p_idx));
394
395#ifdef CHECK_BINSEARCH
396	{
397		struct ext4_extent_idx *chix, *ix;
398		int k;
399
400		chix = ix = EXT_FIRST_INDEX(eh);
401		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
402		  if (k != 0 &&
403		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
404				printk("k=%d, ix=0x%p, first=0x%p\n", k,
405					ix, EXT_FIRST_INDEX(eh));
406				printk("%u <= %u\n",
407				       le32_to_cpu(ix->ei_block),
408				       le32_to_cpu(ix[-1].ei_block));
409			}
410			BUG_ON(k && le32_to_cpu(ix->ei_block)
411					   <= le32_to_cpu(ix[-1].ei_block));
412			if (block < le32_to_cpu(ix->ei_block))
413				break;
414			chix = ix;
415		}
416		BUG_ON(chix != path->p_idx);
417	}
418#endif
419
420}
421
422/*
423 * ext4_ext_binsearch:
424 * binary search for closest extent of the given block
425 * the header must be checked before calling this
426 */
427static void
428ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
429{
430	struct ext4_extent_header *eh = path->p_hdr;
431	struct ext4_extent *r, *l, *m;
432
433	if (eh->eh_entries == 0) {
434		/*
435		 * this leaf is empty:
436		 * we get such a leaf in split/add case
437		 */
438		return;
439	}
440
441	ext_debug("binsearch for %d:  ", block);
442
443	l = EXT_FIRST_EXTENT(eh) + 1;
444	r = EXT_LAST_EXTENT(eh);
445
446	while (l <= r) {
447		m = l + (r - l) / 2;
448		if (block < le32_to_cpu(m->ee_block))
449			r = m - 1;
450		else
451			l = m + 1;
452		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
453				m, le32_to_cpu(m->ee_block),
454				r, le32_to_cpu(r->ee_block));
455	}
456
457	path->p_ext = l - 1;
458	ext_debug("  -> %d:%llu:%d ",
459			le32_to_cpu(path->p_ext->ee_block),
460			ext_pblock(path->p_ext),
461			ext4_ext_get_actual_len(path->p_ext));
462
463#ifdef CHECK_BINSEARCH
464	{
465		struct ext4_extent *chex, *ex;
466		int k;
467
468		chex = ex = EXT_FIRST_EXTENT(eh);
469		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
470			BUG_ON(k && le32_to_cpu(ex->ee_block)
471					  <= le32_to_cpu(ex[-1].ee_block));
472			if (block < le32_to_cpu(ex->ee_block))
473				break;
474			chex = ex;
475		}
476		BUG_ON(chex != path->p_ext);
477	}
478#endif
479
480}
481
482int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
483{
484	struct ext4_extent_header *eh;
485
486	eh = ext_inode_hdr(inode);
487	eh->eh_depth = 0;
488	eh->eh_entries = 0;
489	eh->eh_magic = EXT4_EXT_MAGIC;
490	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
491	ext4_mark_inode_dirty(handle, inode);
492	ext4_ext_invalidate_cache(inode);
493	return 0;
494}
495
496struct ext4_ext_path *
497ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
498{
499	struct ext4_extent_header *eh;
500	struct buffer_head *bh;
501	short int depth, i, ppos = 0, alloc = 0;
502
503	eh = ext_inode_hdr(inode);
504	depth = ext_depth(inode);
505	if (ext4_ext_check_header(inode, eh, depth))
506		return ERR_PTR(-EIO);
507
508
509	/* account possible depth increase */
510	if (!path) {
511		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
512				GFP_NOFS);
513		if (!path)
514			return ERR_PTR(-ENOMEM);
515		alloc = 1;
516	}
517	path[0].p_hdr = eh;
518
519	i = depth;
520	/* walk through the tree */
521	while (i) {
522		ext_debug("depth %d: num %d, max %d\n",
523			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
524
525		ext4_ext_binsearch_idx(inode, path + ppos, block);
526		path[ppos].p_block = idx_pblock(path[ppos].p_idx);
527		path[ppos].p_depth = i;
528		path[ppos].p_ext = NULL;
529
530		bh = sb_bread(inode->i_sb, path[ppos].p_block);
531		if (!bh)
532			goto err;
533
534		eh = ext_block_hdr(bh);
535		ppos++;
536		BUG_ON(ppos > depth);
537		path[ppos].p_bh = bh;
538		path[ppos].p_hdr = eh;
539		i--;
540
541		if (ext4_ext_check_header(inode, eh, i))
542			goto err;
543	}
544
545	path[ppos].p_depth = i;
546	path[ppos].p_hdr = eh;
547	path[ppos].p_ext = NULL;
548	path[ppos].p_idx = NULL;
549
550	/* find extent */
551	ext4_ext_binsearch(inode, path + ppos, block);
552
553	ext4_ext_show_path(inode, path);
554
555	return path;
556
557err:
558	ext4_ext_drop_refs(path);
559	if (alloc)
560		kfree(path);
561	return ERR_PTR(-EIO);
562}
563
564/*
565 * ext4_ext_insert_index:
566 * insert new index [@logical;@ptr] into the block at @curp;
567 * check where to insert: before @curp or after @curp
568 */
569static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
570				struct ext4_ext_path *curp,
571				int logical, ext4_fsblk_t ptr)
572{
573	struct ext4_extent_idx *ix;
574	int len, err;
575
576	err = ext4_ext_get_access(handle, inode, curp);
577	if (err)
578		return err;
579
580	BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
581	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
582	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
583		/* insert after */
584		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
585			len = (len - 1) * sizeof(struct ext4_extent_idx);
586			len = len < 0 ? 0 : len;
587			ext_debug("insert new index %d after: %llu. "
588					"move %d from 0x%p to 0x%p\n",
589					logical, ptr, len,
590					(curp->p_idx + 1), (curp->p_idx + 2));
591			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
592		}
593		ix = curp->p_idx + 1;
594	} else {
595		/* insert before */
596		len = len * sizeof(struct ext4_extent_idx);
597		len = len < 0 ? 0 : len;
598		ext_debug("insert new index %d before: %llu. "
599				"move %d from 0x%p to 0x%p\n",
600				logical, ptr, len,
601				curp->p_idx, (curp->p_idx + 1));
602		memmove(curp->p_idx + 1, curp->p_idx, len);
603		ix = curp->p_idx;
604	}
605
606	ix->ei_block = cpu_to_le32(logical);
607	ext4_idx_store_pblock(ix, ptr);
608	curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
609
610	BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
611			     > le16_to_cpu(curp->p_hdr->eh_max));
612	BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
613
614	err = ext4_ext_dirty(handle, inode, curp);
615	ext4_std_error(inode->i_sb, err);
616
617	return err;
618}
619
620/*
621 * ext4_ext_split:
622 * inserts new subtree into the path, using free index entry
623 * at depth @at:
624 * - allocates all needed blocks (new leaf and all intermediate index blocks)
625 * - makes decision where to split
626 * - moves remaining extents and index entries (right to the split point)
627 *   into the newly allocated blocks
628 * - initializes subtree
629 */
630static int ext4_ext_split(handle_t *handle, struct inode *inode,
631				struct ext4_ext_path *path,
632				struct ext4_extent *newext, int at)
633{
634	struct buffer_head *bh = NULL;
635	int depth = ext_depth(inode);
636	struct ext4_extent_header *neh;
637	struct ext4_extent_idx *fidx;
638	struct ext4_extent *ex;
639	int i = at, k, m, a;
640	ext4_fsblk_t newblock, oldblock;
641	__le32 border;
642	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
643	int err = 0;
644
645	/* make decision: where to split? */
646	/* FIXME: now decision is simplest: at current extent */
647
648	/* if current leaf will be split, then we should use
649	 * border from split point */
650	BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
651	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
652		border = path[depth].p_ext[1].ee_block;
653		ext_debug("leaf will be split."
654				" next leaf starts at %d\n",
655				  le32_to_cpu(border));
656	} else {
657		border = newext->ee_block;
658		ext_debug("leaf will be added."
659				" next leaf starts at %d\n",
660				le32_to_cpu(border));
661	}
662
663	/*
664	 * If error occurs, then we break processing
665	 * and mark filesystem read-only. index won't
666	 * be inserted and tree will be in consistent
667	 * state. Next mount will repair buffers too.
668	 */
669
670	/*
671	 * Get array to track all allocated blocks.
672	 * We need this to handle errors and free blocks
673	 * upon them.
674	 */
675	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
676	if (!ablocks)
677		return -ENOMEM;
678
679	/* allocate all needed blocks */
680	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
681	for (a = 0; a < depth - at; a++) {
682		newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
683		if (newblock == 0)
684			goto cleanup;
685		ablocks[a] = newblock;
686	}
687
688	/* initialize new leaf */
689	newblock = ablocks[--a];
690	BUG_ON(newblock == 0);
691	bh = sb_getblk(inode->i_sb, newblock);
692	if (!bh) {
693		err = -EIO;
694		goto cleanup;
695	}
696	lock_buffer(bh);
697
698	err = ext4_journal_get_create_access(handle, bh);
699	if (err)
700		goto cleanup;
701
702	neh = ext_block_hdr(bh);
703	neh->eh_entries = 0;
704	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
705	neh->eh_magic = EXT4_EXT_MAGIC;
706	neh->eh_depth = 0;
707	ex = EXT_FIRST_EXTENT(neh);
708
709	/* move remainder of path[depth] to the new leaf */
710	BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
711	/* start copy from next extent */
712	/* TODO: we could do it by single memmove */
713	m = 0;
714	path[depth].p_ext++;
715	while (path[depth].p_ext <=
716			EXT_MAX_EXTENT(path[depth].p_hdr)) {
717		ext_debug("move %d:%llu:%d in new leaf %llu\n",
718				le32_to_cpu(path[depth].p_ext->ee_block),
719				ext_pblock(path[depth].p_ext),
720				ext4_ext_get_actual_len(path[depth].p_ext),
721				newblock);
722		/*memmove(ex++, path[depth].p_ext++,
723				sizeof(struct ext4_extent));
724		neh->eh_entries++;*/
725		path[depth].p_ext++;
726		m++;
727	}
728	if (m) {
729		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
730		neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
731	}
732
733	set_buffer_uptodate(bh);
734	unlock_buffer(bh);
735
736	err = ext4_journal_dirty_metadata(handle, bh);
737	if (err)
738		goto cleanup;
739	brelse(bh);
740	bh = NULL;
741
742	/* correct old leaf */
743	if (m) {
744		err = ext4_ext_get_access(handle, inode, path + depth);
745		if (err)
746			goto cleanup;
747		path[depth].p_hdr->eh_entries =
748		     cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
749		err = ext4_ext_dirty(handle, inode, path + depth);
750		if (err)
751			goto cleanup;
752
753	}
754
755	/* create intermediate indexes */
756	k = depth - at - 1;
757	BUG_ON(k < 0);
758	if (k)
759		ext_debug("create %d intermediate indices\n", k);
760	/* insert new index into current index block */
761	/* current depth stored in i var */
762	i = depth - 1;
763	while (k--) {
764		oldblock = newblock;
765		newblock = ablocks[--a];
766		bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock);
767		if (!bh) {
768			err = -EIO;
769			goto cleanup;
770		}
771		lock_buffer(bh);
772
773		err = ext4_journal_get_create_access(handle, bh);
774		if (err)
775			goto cleanup;
776
777		neh = ext_block_hdr(bh);
778		neh->eh_entries = cpu_to_le16(1);
779		neh->eh_magic = EXT4_EXT_MAGIC;
780		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
781		neh->eh_depth = cpu_to_le16(depth - i);
782		fidx = EXT_FIRST_INDEX(neh);
783		fidx->ei_block = border;
784		ext4_idx_store_pblock(fidx, oldblock);
785
786		ext_debug("int.index at %d (block %llu): %lu -> %llu\n", i,
787				newblock, (unsigned long) le32_to_cpu(border),
788				oldblock);
789		/* copy indexes */
790		m = 0;
791		path[i].p_idx++;
792
793		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
794				EXT_MAX_INDEX(path[i].p_hdr));
795		BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
796				EXT_LAST_INDEX(path[i].p_hdr));
797		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
798			ext_debug("%d: move %d:%llu in new index %llu\n", i,
799					le32_to_cpu(path[i].p_idx->ei_block),
800					idx_pblock(path[i].p_idx),
801					newblock);
802			/*memmove(++fidx, path[i].p_idx++,
803					sizeof(struct ext4_extent_idx));
804			neh->eh_entries++;
805			BUG_ON(neh->eh_entries > neh->eh_max);*/
806			path[i].p_idx++;
807			m++;
808		}
809		if (m) {
810			memmove(++fidx, path[i].p_idx - m,
811				sizeof(struct ext4_extent_idx) * m);
812			neh->eh_entries =
813				cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
814		}
815		set_buffer_uptodate(bh);
816		unlock_buffer(bh);
817
818		err = ext4_journal_dirty_metadata(handle, bh);
819		if (err)
820			goto cleanup;
821		brelse(bh);
822		bh = NULL;
823
824		/* correct old index */
825		if (m) {
826			err = ext4_ext_get_access(handle, inode, path + i);
827			if (err)
828				goto cleanup;
829			path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
830			err = ext4_ext_dirty(handle, inode, path + i);
831			if (err)
832				goto cleanup;
833		}
834
835		i--;
836	}
837
838	/* insert new index */
839	err = ext4_ext_insert_index(handle, inode, path + at,
840				    le32_to_cpu(border), newblock);
841
842cleanup:
843	if (bh) {
844		if (buffer_locked(bh))
845			unlock_buffer(bh);
846		brelse(bh);
847	}
848
849	if (err) {
850		/* free all allocated blocks in error case */
851		for (i = 0; i < depth; i++) {
852			if (!ablocks[i])
853				continue;
854			ext4_free_blocks(handle, inode, ablocks[i], 1);
855		}
856	}
857	kfree(ablocks);
858
859	return err;
860}
861
862/*
863 * ext4_ext_grow_indepth:
864 * implements tree growing procedure:
865 * - allocates new block
866 * - moves top-level data (index block or leaf) into the new block
867 * - initializes new top-level, creating index that points to the
868 *   just created block
869 */
870static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
871					struct ext4_ext_path *path,
872					struct ext4_extent *newext)
873{
874	struct ext4_ext_path *curp = path;
875	struct ext4_extent_header *neh;
876	struct ext4_extent_idx *fidx;
877	struct buffer_head *bh;
878	ext4_fsblk_t newblock;
879	int err = 0;
880
881	newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
882	if (newblock == 0)
883		return err;
884
885	bh = sb_getblk(inode->i_sb, newblock);
886	if (!bh) {
887		err = -EIO;
888		ext4_std_error(inode->i_sb, err);
889		return err;
890	}
891	lock_buffer(bh);
892
893	err = ext4_journal_get_create_access(handle, bh);
894	if (err) {
895		unlock_buffer(bh);
896		goto out;
897	}
898
899	/* move top-level index/leaf into new block */
900	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
901
902	/* set size of new block */
903	neh = ext_block_hdr(bh);
904	/* old root could have indexes or leaves
905	 * so calculate e_max right way */
906	if (ext_depth(inode))
907	  neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
908	else
909	  neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
910	neh->eh_magic = EXT4_EXT_MAGIC;
911	set_buffer_uptodate(bh);
912	unlock_buffer(bh);
913
914	err = ext4_journal_dirty_metadata(handle, bh);
915	if (err)
916		goto out;
917
918	/* create index in new top-level index: num,max,pointer */
919	err = ext4_ext_get_access(handle, inode, curp);
920	if (err)
921		goto out;
922
923	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
924	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
925	curp->p_hdr->eh_entries = cpu_to_le16(1);
926	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
927
928	if (path[0].p_hdr->eh_depth)
929		curp->p_idx->ei_block =
930			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
931	else
932		curp->p_idx->ei_block =
933			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
934	ext4_idx_store_pblock(curp->p_idx, newblock);
935
936	neh = ext_inode_hdr(inode);
937	fidx = EXT_FIRST_INDEX(neh);
938	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
939		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
940		  le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
941
942	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
943	err = ext4_ext_dirty(handle, inode, curp);
944out:
945	brelse(bh);
946
947	return err;
948}
949
950/*
951 * ext4_ext_create_new_leaf:
952 * finds empty index and adds new leaf.
953 * if no free index is found, then it requests in-depth growing.
954 */
955static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
956					struct ext4_ext_path *path,
957					struct ext4_extent *newext)
958{
959	struct ext4_ext_path *curp;
960	int depth, i, err = 0;
961
962repeat:
963	i = depth = ext_depth(inode);
964
965	/* walk up to the tree and look for free index entry */
966	curp = path + depth;
967	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
968		i--;
969		curp--;
970	}
971
972	/* we use already allocated block for index block,
973	 * so subsequent data blocks should be contiguous */
974	if (EXT_HAS_FREE_INDEX(curp)) {
975		/* if we found index with free entry, then use that
976		 * entry: create all needed subtree and add new leaf */
977		err = ext4_ext_split(handle, inode, path, newext, i);
978
979		/* refill path */
980		ext4_ext_drop_refs(path);
981		path = ext4_ext_find_extent(inode,
982					    le32_to_cpu(newext->ee_block),
983					    path);
984		if (IS_ERR(path))
985			err = PTR_ERR(path);
986	} else {
987		/* tree is full, time to grow in depth */
988		err = ext4_ext_grow_indepth(handle, inode, path, newext);
989		if (err)
990			goto out;
991
992		/* refill path */
993		ext4_ext_drop_refs(path);
994		path = ext4_ext_find_extent(inode,
995					    le32_to_cpu(newext->ee_block),
996					    path);
997		if (IS_ERR(path)) {
998			err = PTR_ERR(path);
999			goto out;
1000		}
1001
1002		/*
1003		 * only first (depth 0 -> 1) produces free space;
1004		 * in all other cases we have to split the grown tree
1005		 */
1006		depth = ext_depth(inode);
1007		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1008			/* now we need to split */
1009			goto repeat;
1010		}
1011	}
1012
1013out:
1014	return err;
1015}
1016
1017/*
1018 * ext4_ext_next_allocated_block:
1019 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1020 * NOTE: it considers block number from index entry as
1021 * allocated block. Thus, index entries have to be consistent
1022 * with leaves.
1023 */
1024static unsigned long
1025ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1026{
1027	int depth;
1028
1029	BUG_ON(path == NULL);
1030	depth = path->p_depth;
1031
1032	if (depth == 0 && path->p_ext == NULL)
1033		return EXT_MAX_BLOCK;
1034
1035	while (depth >= 0) {
1036		if (depth == path->p_depth) {
1037			/* leaf */
1038			if (path[depth].p_ext !=
1039					EXT_LAST_EXTENT(path[depth].p_hdr))
1040			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
1041		} else {
1042			/* index */
1043			if (path[depth].p_idx !=
1044					EXT_LAST_INDEX(path[depth].p_hdr))
1045			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1046		}
1047		depth--;
1048	}
1049
1050	return EXT_MAX_BLOCK;
1051}
1052
1053/*
1054 * ext4_ext_next_leaf_block:
1055 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1056 */
1057static unsigned ext4_ext_next_leaf_block(struct inode *inode,
1058					struct ext4_ext_path *path)
1059{
1060	int depth;
1061
1062	BUG_ON(path == NULL);
1063	depth = path->p_depth;
1064
1065	/* zero-tree has no leaf blocks at all */
1066	if (depth == 0)
1067		return EXT_MAX_BLOCK;
1068
1069	/* go to index block */
1070	depth--;
1071
1072	while (depth >= 0) {
1073		if (path[depth].p_idx !=
1074				EXT_LAST_INDEX(path[depth].p_hdr))
1075		  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1076		depth--;
1077	}
1078
1079	return EXT_MAX_BLOCK;
1080}
1081
1082/*
1083 * ext4_ext_correct_indexes:
1084 * if leaf gets modified and modified extent is first in the leaf,
1085 * then we have to correct all indexes above.
1086 * TODO: do we need to correct tree in all cases?
1087 */
1088int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1089				struct ext4_ext_path *path)
1090{
1091	struct ext4_extent_header *eh;
1092	int depth = ext_depth(inode);
1093	struct ext4_extent *ex;
1094	__le32 border;
1095	int k, err = 0;
1096
1097	eh = path[depth].p_hdr;
1098	ex = path[depth].p_ext;
1099	BUG_ON(ex == NULL);
1100	BUG_ON(eh == NULL);
1101
1102	if (depth == 0) {
1103		/* there is no tree at all */
1104		return 0;
1105	}
1106
1107	if (ex != EXT_FIRST_EXTENT(eh)) {
1108		/* we correct tree if first leaf got modified only */
1109		return 0;
1110	}
1111
1112	/*
1113	 * TODO: we need correction if border is smaller than current one
1114	 */
1115	k = depth - 1;
1116	border = path[depth].p_ext->ee_block;
1117	err = ext4_ext_get_access(handle, inode, path + k);
1118	if (err)
1119		return err;
1120	path[k].p_idx->ei_block = border;
1121	err = ext4_ext_dirty(handle, inode, path + k);
1122	if (err)
1123		return err;
1124
1125	while (k--) {
1126		/* change all left-side indexes */
1127		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1128			break;
1129		err = ext4_ext_get_access(handle, inode, path + k);
1130		if (err)
1131			break;
1132		path[k].p_idx->ei_block = border;
1133		err = ext4_ext_dirty(handle, inode, path + k);
1134		if (err)
1135			break;
1136	}
1137
1138	return err;
1139}
1140
1141static int
1142ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1143				struct ext4_extent *ex2)
1144{
1145	unsigned short ext1_ee_len, ext2_ee_len, max_len;
1146
1147	/*
1148	 * Make sure that either both extents are uninitialized, or
1149	 * both are _not_.
1150	 */
1151	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1152		return 0;
1153
1154	if (ext4_ext_is_uninitialized(ex1))
1155		max_len = EXT_UNINIT_MAX_LEN;
1156	else
1157		max_len = EXT_INIT_MAX_LEN;
1158
1159	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1160	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1161
1162	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1163			le32_to_cpu(ex2->ee_block))
1164		return 0;
1165
1166	/*
1167	 * To allow future support for preallocated extents to be added
1168	 * as an RO_COMPAT feature, refuse to merge to extents if
1169	 * this can result in the top bit of ee_len being set.
1170	 */
1171	if (ext1_ee_len + ext2_ee_len > max_len)
1172		return 0;
1173#ifdef AGGRESSIVE_TEST
1174	if (le16_to_cpu(ex1->ee_len) >= 4)
1175		return 0;
1176#endif
1177
1178	if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1179		return 1;
1180	return 0;
1181}
1182
1183/*
1184 * This function tries to merge the "ex" extent to the next extent in the tree.
1185 * It always tries to merge towards right. If you want to merge towards
1186 * left, pass "ex - 1" as argument instead of "ex".
1187 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1188 * 1 if they got merged.
1189 */
1190int ext4_ext_try_to_merge(struct inode *inode,
1191			  struct ext4_ext_path *path,
1192			  struct ext4_extent *ex)
1193{
1194	struct ext4_extent_header *eh;
1195	unsigned int depth, len;
1196	int merge_done = 0;
1197	int uninitialized = 0;
1198
1199	depth = ext_depth(inode);
1200	BUG_ON(path[depth].p_hdr == NULL);
1201	eh = path[depth].p_hdr;
1202
1203	while (ex < EXT_LAST_EXTENT(eh)) {
1204		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1205			break;
1206		/* merge with next extent! */
1207		if (ext4_ext_is_uninitialized(ex))
1208			uninitialized = 1;
1209		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1210				+ ext4_ext_get_actual_len(ex + 1));
1211		if (uninitialized)
1212			ext4_ext_mark_uninitialized(ex);
1213
1214		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1215			len = (EXT_LAST_EXTENT(eh) - ex - 1)
1216				* sizeof(struct ext4_extent);
1217			memmove(ex + 1, ex + 2, len);
1218		}
1219		eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1);
1220		merge_done = 1;
1221		WARN_ON(eh->eh_entries == 0);
1222		if (!eh->eh_entries)
1223			ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1224			   "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1225	}
1226
1227	return merge_done;
1228}
1229
1230/*
1231 * check if a portion of the "newext" extent overlaps with an
1232 * existing extent.
1233 *
1234 * If there is an overlap discovered, it updates the length of the newext
1235 * such that there will be no overlap, and then returns 1.
1236 * If there is no overlap found, it returns 0.
1237 */
1238unsigned int ext4_ext_check_overlap(struct inode *inode,
1239				    struct ext4_extent *newext,
1240				    struct ext4_ext_path *path)
1241{
1242	unsigned long b1, b2;
1243	unsigned int depth, len1;
1244	unsigned int ret = 0;
1245
1246	b1 = le32_to_cpu(newext->ee_block);
1247	len1 = ext4_ext_get_actual_len(newext);
1248	depth = ext_depth(inode);
1249	if (!path[depth].p_ext)
1250		goto out;
1251	b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1252
1253	/*
1254	 * get the next allocated block if the extent in the path
1255	 * is before the requested block(s)
1256	 */
1257	if (b2 < b1) {
1258		b2 = ext4_ext_next_allocated_block(path);
1259		if (b2 == EXT_MAX_BLOCK)
1260			goto out;
1261	}
1262
1263	/* check for wrap through zero */
1264	if (b1 + len1 < b1) {
1265		len1 = EXT_MAX_BLOCK - b1;
1266		newext->ee_len = cpu_to_le16(len1);
1267		ret = 1;
1268	}
1269
1270	/* check for overlap */
1271	if (b1 + len1 > b2) {
1272		newext->ee_len = cpu_to_le16(b2 - b1);
1273		ret = 1;
1274	}
1275out:
1276	return ret;
1277}
1278
1279/*
1280 * ext4_ext_insert_extent:
1281 * tries to merge requsted extent into the existing extent or
1282 * inserts requested extent as new one into the tree,
1283 * creating new leaf in the no-space case.
1284 */
1285int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1286				struct ext4_ext_path *path,
1287				struct ext4_extent *newext)
1288{
1289	struct ext4_extent_header * eh;
1290	struct ext4_extent *ex, *fex;
1291	struct ext4_extent *nearex; /* nearest extent */
1292	struct ext4_ext_path *npath = NULL;
1293	int depth, len, err, next;
1294	unsigned uninitialized = 0;
1295
1296	BUG_ON(ext4_ext_get_actual_len(newext) == 0);
1297	depth = ext_depth(inode);
1298	ex = path[depth].p_ext;
1299	BUG_ON(path[depth].p_hdr == NULL);
1300
1301	/* try to insert block into found extent and return */
1302	if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1303		ext_debug("append %d block to %d:%d (from %llu)\n",
1304				ext4_ext_get_actual_len(newext),
1305				le32_to_cpu(ex->ee_block),
1306				ext4_ext_get_actual_len(ex), ext_pblock(ex));
1307		err = ext4_ext_get_access(handle, inode, path + depth);
1308		if (err)
1309			return err;
1310
1311		/*
1312		 * ext4_can_extents_be_merged should have checked that either
1313		 * both extents are uninitialized, or both aren't. Thus we
1314		 * need to check only one of them here.
1315		 */
1316		if (ext4_ext_is_uninitialized(ex))
1317			uninitialized = 1;
1318		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1319					+ ext4_ext_get_actual_len(newext));
1320		if (uninitialized)
1321			ext4_ext_mark_uninitialized(ex);
1322		eh = path[depth].p_hdr;
1323		nearex = ex;
1324		goto merge;
1325	}
1326
1327repeat:
1328	depth = ext_depth(inode);
1329	eh = path[depth].p_hdr;
1330	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1331		goto has_space;
1332
1333	/* probably next leaf has space for us? */
1334	fex = EXT_LAST_EXTENT(eh);
1335	next = ext4_ext_next_leaf_block(inode, path);
1336	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1337	    && next != EXT_MAX_BLOCK) {
1338		ext_debug("next leaf block - %d\n", next);
1339		BUG_ON(npath != NULL);
1340		npath = ext4_ext_find_extent(inode, next, NULL);
1341		if (IS_ERR(npath))
1342			return PTR_ERR(npath);
1343		BUG_ON(npath->p_depth != path->p_depth);
1344		eh = npath[depth].p_hdr;
1345		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1346			ext_debug("next leaf isnt full(%d)\n",
1347				  le16_to_cpu(eh->eh_entries));
1348			path = npath;
1349			goto repeat;
1350		}
1351		ext_debug("next leaf has no free space(%d,%d)\n",
1352			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1353	}
1354
1355	/*
1356	 * There is no free space in the found leaf.
1357	 * We're gonna add a new leaf in the tree.
1358	 */
1359	err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1360	if (err)
1361		goto cleanup;
1362	depth = ext_depth(inode);
1363	eh = path[depth].p_hdr;
1364
1365has_space:
1366	nearex = path[depth].p_ext;
1367
1368	err = ext4_ext_get_access(handle, inode, path + depth);
1369	if (err)
1370		goto cleanup;
1371
1372	if (!nearex) {
1373		/* there is no extent in this leaf, create first one */
1374		ext_debug("first extent in the leaf: %d:%llu:%d\n",
1375				le32_to_cpu(newext->ee_block),
1376				ext_pblock(newext),
1377				ext4_ext_get_actual_len(newext));
1378		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1379	} else if (le32_to_cpu(newext->ee_block)
1380			   > le32_to_cpu(nearex->ee_block)) {
1381/*		BUG_ON(newext->ee_block == nearex->ee_block); */
1382		if (nearex != EXT_LAST_EXTENT(eh)) {
1383			len = EXT_MAX_EXTENT(eh) - nearex;
1384			len = (len - 1) * sizeof(struct ext4_extent);
1385			len = len < 0 ? 0 : len;
1386			ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1387					"move %d from 0x%p to 0x%p\n",
1388					le32_to_cpu(newext->ee_block),
1389					ext_pblock(newext),
1390					ext4_ext_get_actual_len(newext),
1391					nearex, len, nearex + 1, nearex + 2);
1392			memmove(nearex + 2, nearex + 1, len);
1393		}
1394		path[depth].p_ext = nearex + 1;
1395	} else {
1396		BUG_ON(newext->ee_block == nearex->ee_block);
1397		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1398		len = len < 0 ? 0 : len;
1399		ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1400				"move %d from 0x%p to 0x%p\n",
1401				le32_to_cpu(newext->ee_block),
1402				ext_pblock(newext),
1403				ext4_ext_get_actual_len(newext),
1404				nearex, len, nearex + 1, nearex + 2);
1405		memmove(nearex + 1, nearex, len);
1406		path[depth].p_ext = nearex;
1407	}
1408
1409	eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
1410	nearex = path[depth].p_ext;
1411	nearex->ee_block = newext->ee_block;
1412	nearex->ee_start = newext->ee_start;
1413	nearex->ee_start_hi = newext->ee_start_hi;
1414	nearex->ee_len = newext->ee_len;
1415
1416merge:
1417	/* try to merge extents to the right */
1418	ext4_ext_try_to_merge(inode, path, nearex);
1419
1420	/* try to merge extents to the left */
1421
1422	/* time to correct all indexes above */
1423	err = ext4_ext_correct_indexes(handle, inode, path);
1424	if (err)
1425		goto cleanup;
1426
1427	err = ext4_ext_dirty(handle, inode, path + depth);
1428
1429cleanup:
1430	if (npath) {
1431		ext4_ext_drop_refs(npath);
1432		kfree(npath);
1433	}
1434	ext4_ext_tree_changed(inode);
1435	ext4_ext_invalidate_cache(inode);
1436	return err;
1437}
1438
1439int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1440			unsigned long num, ext_prepare_callback func,
1441			void *cbdata)
1442{
1443	struct ext4_ext_path *path = NULL;
1444	struct ext4_ext_cache cbex;
1445	struct ext4_extent *ex;
1446	unsigned long next, start = 0, end = 0;
1447	unsigned long last = block + num;
1448	int depth, exists, err = 0;
1449
1450	BUG_ON(func == NULL);
1451	BUG_ON(inode == NULL);
1452
1453	while (block < last && block != EXT_MAX_BLOCK) {
1454		num = last - block;
1455		/* find extent for this block */
1456		path = ext4_ext_find_extent(inode, block, path);
1457		if (IS_ERR(path)) {
1458			err = PTR_ERR(path);
1459			path = NULL;
1460			break;
1461		}
1462
1463		depth = ext_depth(inode);
1464		BUG_ON(path[depth].p_hdr == NULL);
1465		ex = path[depth].p_ext;
1466		next = ext4_ext_next_allocated_block(path);
1467
1468		exists = 0;
1469		if (!ex) {
1470			/* there is no extent yet, so try to allocate
1471			 * all requested space */
1472			start = block;
1473			end = block + num;
1474		} else if (le32_to_cpu(ex->ee_block) > block) {
1475			/* need to allocate space before found extent */
1476			start = block;
1477			end = le32_to_cpu(ex->ee_block);
1478			if (block + num < end)
1479				end = block + num;
1480		} else if (block >= le32_to_cpu(ex->ee_block)
1481					+ ext4_ext_get_actual_len(ex)) {
1482			/* need to allocate space after found extent */
1483			start = block;
1484			end = block + num;
1485			if (end >= next)
1486				end = next;
1487		} else if (block >= le32_to_cpu(ex->ee_block)) {
1488			/*
1489			 * some part of requested space is covered
1490			 * by found extent
1491			 */
1492			start = block;
1493			end = le32_to_cpu(ex->ee_block)
1494				+ ext4_ext_get_actual_len(ex);
1495			if (block + num < end)
1496				end = block + num;
1497			exists = 1;
1498		} else {
1499			BUG();
1500		}
1501		BUG_ON(end <= start);
1502
1503		if (!exists) {
1504			cbex.ec_block = start;
1505			cbex.ec_len = end - start;
1506			cbex.ec_start = 0;
1507			cbex.ec_type = EXT4_EXT_CACHE_GAP;
1508		} else {
1509			cbex.ec_block = le32_to_cpu(ex->ee_block);
1510			cbex.ec_len = ext4_ext_get_actual_len(ex);
1511			cbex.ec_start = ext_pblock(ex);
1512			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1513		}
1514
1515		BUG_ON(cbex.ec_len == 0);
1516		err = func(inode, path, &cbex, cbdata);
1517		ext4_ext_drop_refs(path);
1518
1519		if (err < 0)
1520			break;
1521		if (err == EXT_REPEAT)
1522			continue;
1523		else if (err == EXT_BREAK) {
1524			err = 0;
1525			break;
1526		}
1527
1528		if (ext_depth(inode) != depth) {
1529			/* depth was changed. we have to realloc path */
1530			kfree(path);
1531			path = NULL;
1532		}
1533
1534		block = cbex.ec_block + cbex.ec_len;
1535	}
1536
1537	if (path) {
1538		ext4_ext_drop_refs(path);
1539		kfree(path);
1540	}
1541
1542	return err;
1543}
1544
1545static void
1546ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1547			__u32 len, ext4_fsblk_t start, int type)
1548{
1549	struct ext4_ext_cache *cex;
1550	BUG_ON(len == 0);
1551	cex = &EXT4_I(inode)->i_cached_extent;
1552	cex->ec_type = type;
1553	cex->ec_block = block;
1554	cex->ec_len = len;
1555	cex->ec_start = start;
1556}
1557
1558/*
1559 * ext4_ext_put_gap_in_cache:
1560 * calculate boundaries of the gap that the requested block fits into
1561 * and cache this gap
1562 */
1563static void
1564ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1565				unsigned long block)
1566{
1567	int depth = ext_depth(inode);
1568	unsigned long lblock, len;
1569	struct ext4_extent *ex;
1570
1571	ex = path[depth].p_ext;
1572	if (ex == NULL) {
1573		/* there is no extent yet, so gap is [0;-] */
1574		lblock = 0;
1575		len = EXT_MAX_BLOCK;
1576		ext_debug("cache gap(whole file):");
1577	} else if (block < le32_to_cpu(ex->ee_block)) {
1578		lblock = block;
1579		len = le32_to_cpu(ex->ee_block) - block;
1580		ext_debug("cache gap(before): %lu [%lu:%lu]",
1581				(unsigned long) block,
1582				(unsigned long) le32_to_cpu(ex->ee_block),
1583				(unsigned long) ext4_ext_get_actual_len(ex));
1584	} else if (block >= le32_to_cpu(ex->ee_block)
1585			+ ext4_ext_get_actual_len(ex)) {
1586		lblock = le32_to_cpu(ex->ee_block)
1587			+ ext4_ext_get_actual_len(ex);
1588		len = ext4_ext_next_allocated_block(path);
1589		ext_debug("cache gap(after): [%lu:%lu] %lu",
1590				(unsigned long) le32_to_cpu(ex->ee_block),
1591				(unsigned long) ext4_ext_get_actual_len(ex),
1592				(unsigned long) block);
1593		BUG_ON(len == lblock);
1594		len = len - lblock;
1595	} else {
1596		lblock = len = 0;
1597		BUG();
1598	}
1599
1600	ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);
1601	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1602}
1603
1604static int
1605ext4_ext_in_cache(struct inode *inode, unsigned long block,
1606			struct ext4_extent *ex)
1607{
1608	struct ext4_ext_cache *cex;
1609
1610	cex = &EXT4_I(inode)->i_cached_extent;
1611
1612	/* has cache valid data? */
1613	if (cex->ec_type == EXT4_EXT_CACHE_NO)
1614		return EXT4_EXT_CACHE_NO;
1615
1616	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1617			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1618	if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1619		ex->ee_block = cpu_to_le32(cex->ec_block);
1620		ext4_ext_store_pblock(ex, cex->ec_start);
1621		ex->ee_len = cpu_to_le16(cex->ec_len);
1622		ext_debug("%lu cached by %lu:%lu:%llu\n",
1623				(unsigned long) block,
1624				(unsigned long) cex->ec_block,
1625				(unsigned long) cex->ec_len,
1626				cex->ec_start);
1627		return cex->ec_type;
1628	}
1629
1630	/* not in cache */
1631	return EXT4_EXT_CACHE_NO;
1632}
1633
1634/*
1635 * ext4_ext_rm_idx:
1636 * removes index from the index block.
1637 * It's used in truncate case only, thus all requests are for
1638 * last index in the block only.
1639 */
1640int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1641			struct ext4_ext_path *path)
1642{
1643	struct buffer_head *bh;
1644	int err;
1645	ext4_fsblk_t leaf;
1646
1647	/* free index block */
1648	path--;
1649	leaf = idx_pblock(path->p_idx);
1650	BUG_ON(path->p_hdr->eh_entries == 0);
1651	err = ext4_ext_get_access(handle, inode, path);
1652	if (err)
1653		return err;
1654	path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1655	err = ext4_ext_dirty(handle, inode, path);
1656	if (err)
1657		return err;
1658	ext_debug("index is empty, remove it, free block %llu\n", leaf);
1659	bh = sb_find_get_block(inode->i_sb, leaf);
1660	ext4_forget(handle, 1, inode, bh, leaf);
1661	ext4_free_blocks(handle, inode, leaf, 1);
1662	return err;
1663}
1664
1665/*
1666 * ext4_ext_calc_credits_for_insert:
1667 * This routine returns max. credits that the extent tree can consume.
1668 * It should be OK for low-performance paths like ->writepage()
1669 * To allow many writing processes to fit into a single transaction,
1670 * the caller should calculate credits under truncate_mutex and
1671 * pass the actual path.
1672 */
1673int ext4_ext_calc_credits_for_insert(struct inode *inode,
1674						struct ext4_ext_path *path)
1675{
1676	int depth, needed;
1677
1678	if (path) {
1679		/* probably there is space in leaf? */
1680		depth = ext_depth(inode);
1681		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1682				< le16_to_cpu(path[depth].p_hdr->eh_max))
1683			return 1;
1684	}
1685
1686	/*
1687	 * given 32-bit logical block (4294967296 blocks), max. tree
1688	 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1689	 * Let's also add one more level for imbalance.
1690	 */
1691	depth = 5;
1692
1693	/* allocation of new data block(s) */
1694	needed = 2;
1695
1696	/*
1697	 * tree can be full, so it would need to grow in depth:
1698	 * we need one credit to modify old root, credits for
1699	 * new root will be added in split accounting
1700	 */
1701	needed += 1;
1702
1703	/*
1704	 * Index split can happen, we would need:
1705	 *    allocate intermediate indexes (bitmap + group)
1706	 *  + change two blocks at each level, but root (already included)
1707	 */
1708	needed += (depth * 2) + (depth * 2);
1709
1710	/* any allocation modifies superblock */
1711	needed += 1;
1712
1713	return needed;
1714}
1715
1716static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1717				struct ext4_extent *ex,
1718				unsigned long from, unsigned long to)
1719{
1720	struct buffer_head *bh;
1721	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
1722	int i;
1723
1724#ifdef EXTENTS_STATS
1725	{
1726		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1727		spin_lock(&sbi->s_ext_stats_lock);
1728		sbi->s_ext_blocks += ee_len;
1729		sbi->s_ext_extents++;
1730		if (ee_len < sbi->s_ext_min)
1731			sbi->s_ext_min = ee_len;
1732		if (ee_len > sbi->s_ext_max)
1733			sbi->s_ext_max = ee_len;
1734		if (ext_depth(inode) > sbi->s_depth_max)
1735			sbi->s_depth_max = ext_depth(inode);
1736		spin_unlock(&sbi->s_ext_stats_lock);
1737	}
1738#endif
1739	if (from >= le32_to_cpu(ex->ee_block)
1740	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
1741		/* tail removal */
1742		unsigned long num;
1743		ext4_fsblk_t start;
1744		num = le32_to_cpu(ex->ee_block) + ee_len - from;
1745		start = ext_pblock(ex) + ee_len - num;
1746		ext_debug("free last %lu blocks starting %llu\n", num, start);
1747		for (i = 0; i < num; i++) {
1748			bh = sb_find_get_block(inode->i_sb, start + i);
1749			ext4_forget(handle, 0, inode, bh, start + i);
1750		}
1751		ext4_free_blocks(handle, inode, start, num);
1752	} else if (from == le32_to_cpu(ex->ee_block)
1753		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
1754		printk("strange request: removal %lu-%lu from %u:%u\n",
1755			from, to, le32_to_cpu(ex->ee_block), ee_len);
1756	} else {
1757		printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1758			from, to, le32_to_cpu(ex->ee_block), ee_len);
1759	}
1760	return 0;
1761}
1762
1763static int
1764ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1765		struct ext4_ext_path *path, unsigned long start)
1766{
1767	int err = 0, correct_index = 0;
1768	int depth = ext_depth(inode), credits;
1769	struct ext4_extent_header *eh;
1770	unsigned a, b, block, num;
1771	unsigned long ex_ee_block;
1772	unsigned short ex_ee_len;
1773	unsigned uninitialized = 0;
1774	struct ext4_extent *ex;
1775
1776	/* the header must be checked already in ext4_ext_remove_space() */
1777	ext_debug("truncate since %lu in leaf\n", start);
1778	if (!path[depth].p_hdr)
1779		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1780	eh = path[depth].p_hdr;
1781	BUG_ON(eh == NULL);
1782
1783	/* find where to start removing */
1784	ex = EXT_LAST_EXTENT(eh);
1785
1786	ex_ee_block = le32_to_cpu(ex->ee_block);
1787	if (ext4_ext_is_uninitialized(ex))
1788		uninitialized = 1;
1789	ex_ee_len = ext4_ext_get_actual_len(ex);
1790
1791	while (ex >= EXT_FIRST_EXTENT(eh) &&
1792			ex_ee_block + ex_ee_len > start) {
1793		ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1794		path[depth].p_ext = ex;
1795
1796		a = ex_ee_block > start ? ex_ee_block : start;
1797		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1798			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1799
1800		ext_debug("  border %u:%u\n", a, b);
1801
1802		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1803			block = 0;
1804			num = 0;
1805			BUG();
1806		} else if (a != ex_ee_block) {
1807			/* remove tail of the extent */
1808			block = ex_ee_block;
1809			num = a - block;
1810		} else if (b != ex_ee_block + ex_ee_len - 1) {
1811			/* remove head of the extent */
1812			block = a;
1813			num = b - a;
1814			/* there is no "make a hole" API yet */
1815			BUG();
1816		} else {
1817			/* remove whole extent: excellent! */
1818			block = ex_ee_block;
1819			num = 0;
1820			BUG_ON(a != ex_ee_block);
1821			BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1822		}
1823
1824		/* at present, extent can't cross block group: */
1825		/* leaf + bitmap + group desc + sb + inode */
1826		credits = 5;
1827		if (ex == EXT_FIRST_EXTENT(eh)) {
1828			correct_index = 1;
1829			credits += (ext_depth(inode)) + 1;
1830		}
1831#ifdef CONFIG_QUOTA
1832		credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1833#endif
1834
1835		handle = ext4_ext_journal_restart(handle, credits);
1836		if (IS_ERR(handle)) {
1837			err = PTR_ERR(handle);
1838			goto out;
1839		}
1840
1841		err = ext4_ext_get_access(handle, inode, path + depth);
1842		if (err)
1843			goto out;
1844
1845		err = ext4_remove_blocks(handle, inode, ex, a, b);
1846		if (err)
1847			goto out;
1848
1849		if (num == 0) {
1850			/* this extent is removed; mark slot entirely unused */
1851			ext4_ext_store_pblock(ex, 0);
1852			eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1853		}
1854
1855		ex->ee_block = cpu_to_le32(block);
1856		ex->ee_len = cpu_to_le16(num);
1857		/*
1858		 * Do not mark uninitialized if all the blocks in the
1859		 * extent have been removed.
1860		 */
1861		if (uninitialized && num)
1862			ext4_ext_mark_uninitialized(ex);
1863
1864		err = ext4_ext_dirty(handle, inode, path + depth);
1865		if (err)
1866			goto out;
1867
1868		ext_debug("new extent: %u:%u:%llu\n", block, num,
1869				ext_pblock(ex));
1870		ex--;
1871		ex_ee_block = le32_to_cpu(ex->ee_block);
1872		ex_ee_len = ext4_ext_get_actual_len(ex);
1873	}
1874
1875	if (correct_index && eh->eh_entries)
1876		err = ext4_ext_correct_indexes(handle, inode, path);
1877
1878	/* if this leaf is free, then we should
1879	 * remove it from index block above */
1880	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1881		err = ext4_ext_rm_idx(handle, inode, path + depth);
1882
1883out:
1884	return err;
1885}
1886
1887/*
1888 * ext4_ext_more_to_rm:
1889 * returns 1 if current index has to be freed (even partial)
1890 */
1891static int
1892ext4_ext_more_to_rm(struct ext4_ext_path *path)
1893{
1894	BUG_ON(path->p_idx == NULL);
1895
1896	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1897		return 0;
1898
1899	/*
1900	 * if truncate on deeper level happened, it wasn't partial,
1901	 * so we have to consider current index for truncation
1902	 */
1903	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1904		return 0;
1905	return 1;
1906}
1907
1908int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1909{
1910	struct super_block *sb = inode->i_sb;
1911	int depth = ext_depth(inode);
1912	struct ext4_ext_path *path;
1913	handle_t *handle;
1914	int i = 0, err = 0;
1915
1916	ext_debug("truncate since %lu\n", start);
1917
1918	/* probably first extent we're gonna free will be last in block */
1919	handle = ext4_journal_start(inode, depth + 1);
1920	if (IS_ERR(handle))
1921		return PTR_ERR(handle);
1922
1923	ext4_ext_invalidate_cache(inode);
1924
1925	/*
1926	 * We start scanning from right side, freeing all the blocks
1927	 * after i_size and walking into the tree depth-wise.
1928	 */
1929	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1930	if (path == NULL) {
1931		ext4_journal_stop(handle);
1932		return -ENOMEM;
1933	}
1934	path[0].p_hdr = ext_inode_hdr(inode);
1935	if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {
1936		err = -EIO;
1937		goto out;
1938	}
1939	path[0].p_depth = depth;
1940
1941	while (i >= 0 && err == 0) {
1942		if (i == depth) {
1943			/* this is leaf block */
1944			err = ext4_ext_rm_leaf(handle, inode, path, start);
1945			/* root level has p_bh == NULL, brelse() eats this */
1946			brelse(path[i].p_bh);
1947			path[i].p_bh = NULL;
1948			i--;
1949			continue;
1950		}
1951
1952		/* this is index block */
1953		if (!path[i].p_hdr) {
1954			ext_debug("initialize header\n");
1955			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
1956		}
1957
1958		if (!path[i].p_idx) {
1959			/* this level hasn't been touched yet */
1960			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1961			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
1962			ext_debug("init index ptr: hdr 0x%p, num %d\n",
1963				  path[i].p_hdr,
1964				  le16_to_cpu(path[i].p_hdr->eh_entries));
1965		} else {
1966			/* we were already here, see at next index */
1967			path[i].p_idx--;
1968		}
1969
1970		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
1971				i, EXT_FIRST_INDEX(path[i].p_hdr),
1972				path[i].p_idx);
1973		if (ext4_ext_more_to_rm(path + i)) {
1974			struct buffer_head *bh;
1975			/* go to the next level */
1976			ext_debug("move to level %d (block %llu)\n",
1977				  i + 1, idx_pblock(path[i].p_idx));
1978			memset(path + i + 1, 0, sizeof(*path));
1979			bh = sb_bread(sb, idx_pblock(path[i].p_idx));
1980			if (!bh) {
1981				/* should we reset i_size? */
1982				err = -EIO;
1983				break;
1984			}
1985			if (WARN_ON(i + 1 > depth)) {
1986				err = -EIO;
1987				break;
1988			}
1989			if (ext4_ext_check_header(inode, ext_block_hdr(bh),
1990							depth - i - 1)) {
1991				err = -EIO;
1992				break;
1993			}
1994			path[i + 1].p_bh = bh;
1995
1996			/* save actual number of indexes since this
1997			 * number is changed at the next iteration */
1998			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
1999			i++;
2000		} else {
2001			/* we finished processing this index, go up */
2002			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2003				/* index is empty, remove it;
2004				 * handle must be already prepared by the
2005				 * truncatei_leaf() */
2006				err = ext4_ext_rm_idx(handle, inode, path + i);
2007			}
2008			/* root level has p_bh == NULL, brelse() eats this */
2009			brelse(path[i].p_bh);
2010			path[i].p_bh = NULL;
2011			i--;
2012			ext_debug("return to level %d\n", i);
2013		}
2014	}
2015
2016	/* TODO: flexible tree reduction should be here */
2017	if (path->p_hdr->eh_entries == 0) {
2018		/*
2019		 * truncate to zero freed all the tree,
2020		 * so we need to correct eh_depth
2021		 */
2022		err = ext4_ext_get_access(handle, inode, path);
2023		if (err == 0) {
2024			ext_inode_hdr(inode)->eh_depth = 0;
2025			ext_inode_hdr(inode)->eh_max =
2026				cpu_to_le16(ext4_ext_space_root(inode));
2027			err = ext4_ext_dirty(handle, inode, path);
2028		}
2029	}
2030out:
2031	ext4_ext_tree_changed(inode);
2032	ext4_ext_drop_refs(path);
2033	kfree(path);
2034	ext4_journal_stop(handle);
2035
2036	return err;
2037}
2038
2039/*
2040 * called at mount time
2041 */
2042void ext4_ext_init(struct super_block *sb)
2043{
2044	/*
2045	 * possible initialization would be here
2046	 */
2047
2048	if (test_opt(sb, EXTENTS)) {
2049		printk("EXT4-fs: file extents enabled");
2050#ifdef AGGRESSIVE_TEST
2051		printk(", aggressive tests");
2052#endif
2053#ifdef CHECK_BINSEARCH
2054		printk(", check binsearch");
2055#endif
2056#ifdef EXTENTS_STATS
2057		printk(", stats");
2058#endif
2059		printk("\n");
2060#ifdef EXTENTS_STATS
2061		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2062		EXT4_SB(sb)->s_ext_min = 1 << 30;
2063		EXT4_SB(sb)->s_ext_max = 0;
2064#endif
2065	}
2066}
2067
2068/*
2069 * called at umount time
2070 */
2071void ext4_ext_release(struct super_block *sb)
2072{
2073	if (!test_opt(sb, EXTENTS))
2074		return;
2075
2076#ifdef EXTENTS_STATS
2077	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2078		struct ext4_sb_info *sbi = EXT4_SB(sb);
2079		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2080			sbi->s_ext_blocks, sbi->s_ext_extents,
2081			sbi->s_ext_blocks / sbi->s_ext_extents);
2082		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2083			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2084	}
2085#endif
2086}
2087
2088/*
2089 * This function is called by ext4_ext_get_blocks() if someone tries to write
2090 * to an uninitialized extent. It may result in splitting the uninitialized
2091 * extent into multiple extents (upto three - one initialized and two
2092 * uninitialized).
2093 * There are three possibilities:
2094 *   a> There is no split required: Entire extent should be initialized
2095 *   b> Splits in two extents: Write is happening at either end of the extent
2096 *   c> Splits in three extents: Somone is writing in middle of the extent
2097 */
2098int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
2099					struct ext4_ext_path *path,
2100					ext4_fsblk_t iblock,
2101					unsigned long max_blocks)
2102{
2103	struct ext4_extent *ex, newex;
2104	struct ext4_extent *ex1 = NULL;
2105	struct ext4_extent *ex2 = NULL;
2106	struct ext4_extent *ex3 = NULL;
2107	struct ext4_extent_header *eh;
2108	unsigned int allocated, ee_block, ee_len, depth;
2109	ext4_fsblk_t newblock;
2110	int err = 0;
2111	int ret = 0;
2112
2113	depth = ext_depth(inode);
2114	eh = path[depth].p_hdr;
2115	ex = path[depth].p_ext;
2116	ee_block = le32_to_cpu(ex->ee_block);
2117	ee_len = ext4_ext_get_actual_len(ex);
2118	allocated = ee_len - (iblock - ee_block);
2119	newblock = iblock - ee_block + ext_pblock(ex);
2120	ex2 = ex;
2121
2122	/* ex1: ee_block to iblock - 1 : uninitialized */
2123	if (iblock > ee_block) {
2124		ex1 = ex;
2125		ex1->ee_len = cpu_to_le16(iblock - ee_block);
2126		ext4_ext_mark_uninitialized(ex1);
2127		ex2 = &newex;
2128	}
2129	/*
2130	 * for sanity, update the length of the ex2 extent before
2131	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2132	 * overlap of blocks.
2133	 */
2134	if (!ex1 && allocated > max_blocks)
2135		ex2->ee_len = cpu_to_le16(max_blocks);
2136	/* ex3: to ee_block + ee_len : uninitialised */
2137	if (allocated > max_blocks) {
2138		unsigned int newdepth;
2139		ex3 = &newex;
2140		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2141		ext4_ext_store_pblock(ex3, newblock + max_blocks);
2142		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2143		ext4_ext_mark_uninitialized(ex3);
2144		err = ext4_ext_insert_extent(handle, inode, path, ex3);
2145		if (err)
2146			goto out;
2147		/*
2148		 * The depth, and hence eh & ex might change
2149		 * as part of the insert above.
2150		 */
2151		newdepth = ext_depth(inode);
2152		if (newdepth != depth) {
2153			depth = newdepth;
2154			path = ext4_ext_find_extent(inode, iblock, NULL);
2155			if (IS_ERR(path)) {
2156				err = PTR_ERR(path);
2157				path = NULL;
2158				goto out;
2159			}
2160			eh = path[depth].p_hdr;
2161			ex = path[depth].p_ext;
2162			if (ex2 != &newex)
2163				ex2 = ex;
2164		}
2165		allocated = max_blocks;
2166	}
2167	/*
2168	 * If there was a change of depth as part of the
2169	 * insertion of ex3 above, we need to update the length
2170	 * of the ex1 extent again here
2171	 */
2172	if (ex1 && ex1 != ex) {
2173		ex1 = ex;
2174		ex1->ee_len = cpu_to_le16(iblock - ee_block);
2175		ext4_ext_mark_uninitialized(ex1);
2176		ex2 = &newex;
2177	}
2178	/* ex2: iblock to iblock + maxblocks-1 : initialised */
2179	ex2->ee_block = cpu_to_le32(iblock);
2180	ex2->ee_start = cpu_to_le32(newblock);
2181	ext4_ext_store_pblock(ex2, newblock);
2182	ex2->ee_len = cpu_to_le16(allocated);
2183	if (ex2 != ex)
2184		goto insert;
2185	err = ext4_ext_get_access(handle, inode, path + depth);
2186	if (err)
2187		goto out;
2188	/*
2189	 * New (initialized) extent starts from the first block
2190	 * in the current extent. i.e., ex2 == ex
2191	 * We have to see if it can be merged with the extent
2192	 * on the left.
2193	 */
2194	if (ex2 > EXT_FIRST_EXTENT(eh)) {
2195		/*
2196		 * To merge left, pass "ex2 - 1" to try_to_merge(),
2197		 * since it merges towards right _only_.
2198		 */
2199		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2200		if (ret) {
2201			err = ext4_ext_correct_indexes(handle, inode, path);
2202			if (err)
2203				goto out;
2204			depth = ext_depth(inode);
2205			ex2--;
2206		}
2207	}
2208	/*
2209	 * Try to Merge towards right. This might be required
2210	 * only when the whole extent is being written to.
2211	 * i.e. ex2 == ex and ex3 == NULL.
2212	 */
2213	if (!ex3) {
2214		ret = ext4_ext_try_to_merge(inode, path, ex2);
2215		if (ret) {
2216			err = ext4_ext_correct_indexes(handle, inode, path);
2217			if (err)
2218				goto out;
2219		}
2220	}
2221	/* Mark modified extent as dirty */
2222	err = ext4_ext_dirty(handle, inode, path + depth);
2223	goto out;
2224insert:
2225	err = ext4_ext_insert_extent(handle, inode, path, &newex);
2226out:
2227	return err ? err : allocated;
2228}
2229
2230int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2231			ext4_fsblk_t iblock,
2232			unsigned long max_blocks, struct buffer_head *bh_result,
2233			int create, int extend_disksize)
2234{
2235	struct ext4_ext_path *path = NULL;
2236	struct ext4_extent_header *eh;
2237	struct ext4_extent newex, *ex;
2238	ext4_fsblk_t goal, newblock;
2239	int err = 0, depth, ret;
2240	unsigned long allocated = 0;
2241
2242	__clear_bit(BH_New, &bh_result->b_state);
2243	ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,
2244			max_blocks, (unsigned) inode->i_ino);
2245	mutex_lock(&EXT4_I(inode)->truncate_mutex);
2246
2247	/* check in cache */
2248	goal = ext4_ext_in_cache(inode, iblock, &newex);
2249	if (goal) {
2250		if (goal == EXT4_EXT_CACHE_GAP) {
2251			if (!create) {
2252				/*
2253				 * block isn't allocated yet and
2254				 * user doesn't want to allocate it
2255				 */
2256				goto out2;
2257			}
2258			/* we should allocate requested block */
2259		} else if (goal == EXT4_EXT_CACHE_EXTENT) {
2260			/* block is already allocated */
2261			newblock = iblock
2262				   - le32_to_cpu(newex.ee_block)
2263				   + ext_pblock(&newex);
2264			/* number of remaining blocks in the extent */
2265			allocated = le16_to_cpu(newex.ee_len) -
2266					(iblock - le32_to_cpu(newex.ee_block));
2267			goto out;
2268		} else {
2269			BUG();
2270		}
2271	}
2272
2273	/* find extent for this block */
2274	path = ext4_ext_find_extent(inode, iblock, NULL);
2275	if (IS_ERR(path)) {
2276		err = PTR_ERR(path);
2277		path = NULL;
2278		goto out2;
2279	}
2280
2281	depth = ext_depth(inode);
2282
2283	/*
2284	 * consistent leaf must not be empty;
2285	 * this situation is possible, though, _during_ tree modification;
2286	 * this is why assert can't be put in ext4_ext_find_extent()
2287	 */
2288	BUG_ON(path[depth].p_ext == NULL && depth != 0);
2289	eh = path[depth].p_hdr;
2290
2291	ex = path[depth].p_ext;
2292	if (ex) {
2293		unsigned long ee_block = le32_to_cpu(ex->ee_block);
2294		ext4_fsblk_t ee_start = ext_pblock(ex);
2295		unsigned short ee_len;
2296
2297		/*
2298		 * Uninitialized extents are treated as holes, except that
2299		 * we split out initialized portions during a write.
2300		 */
2301		ee_len = ext4_ext_get_actual_len(ex);
2302		/* if found extent covers block, simply return it */
2303		if (iblock >= ee_block && iblock < ee_block + ee_len) {
2304			newblock = iblock - ee_block + ee_start;
2305			/* number of remaining blocks in the extent */
2306			allocated = ee_len - (iblock - ee_block);
2307			ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,
2308					ee_block, ee_len, newblock);
2309
2310			/* Do not put uninitialized extent in the cache */
2311			if (!ext4_ext_is_uninitialized(ex)) {
2312				ext4_ext_put_in_cache(inode, ee_block,
2313							ee_len, ee_start,
2314							EXT4_EXT_CACHE_EXTENT);
2315				goto out;
2316			}
2317			if (create == EXT4_CREATE_UNINITIALIZED_EXT)
2318				goto out;
2319			if (!create)
2320				goto out2;
2321
2322			ret = ext4_ext_convert_to_initialized(handle, inode,
2323								path, iblock,
2324								max_blocks);
2325			if (ret <= 0)
2326				goto out2;
2327			else
2328				allocated = ret;
2329			goto outnew;
2330		}
2331	}
2332
2333	/*
2334	 * requested block isn't allocated yet;
2335	 * we couldn't try to create block if create flag is zero
2336	 */
2337	if (!create) {
2338		/*
2339		 * put just found gap into cache to speed up
2340		 * subsequent requests
2341		 */
2342		ext4_ext_put_gap_in_cache(inode, path, iblock);
2343		goto out2;
2344	}
2345	/*
2346	 * Okay, we need to do block allocation.  Lazily initialize the block
2347	 * allocation info here if necessary.
2348	 */
2349	if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
2350		ext4_init_block_alloc_info(inode);
2351
2352	/* allocate new block */
2353	goal = ext4_ext_find_goal(inode, path, iblock);
2354
2355	/*
2356	 * See if request is beyond maximum number of blocks we can have in
2357	 * a single extent. For an initialized extent this limit is
2358	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
2359	 * EXT_UNINIT_MAX_LEN.
2360	 */
2361	if (max_blocks > EXT_INIT_MAX_LEN &&
2362	    create != EXT4_CREATE_UNINITIALIZED_EXT)
2363		max_blocks = EXT_INIT_MAX_LEN;
2364	else if (max_blocks > EXT_UNINIT_MAX_LEN &&
2365		 create == EXT4_CREATE_UNINITIALIZED_EXT)
2366		max_blocks = EXT_UNINIT_MAX_LEN;
2367
2368	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
2369	newex.ee_block = cpu_to_le32(iblock);
2370	newex.ee_len = cpu_to_le16(max_blocks);
2371	err = ext4_ext_check_overlap(inode, &newex, path);
2372	if (err)
2373		allocated = le16_to_cpu(newex.ee_len);
2374	else
2375		allocated = max_blocks;
2376	newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
2377	if (!newblock)
2378		goto out2;
2379	ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2380			goal, newblock, allocated);
2381
2382	/* try to insert new extent into found leaf and return */
2383	ext4_ext_store_pblock(&newex, newblock);
2384	newex.ee_len = cpu_to_le16(allocated);
2385	if (create == EXT4_CREATE_UNINITIALIZED_EXT)  /* Mark uninitialized */
2386		ext4_ext_mark_uninitialized(&newex);
2387	err = ext4_ext_insert_extent(handle, inode, path, &newex);
2388	if (err) {
2389		/* free data blocks we just allocated */
2390		ext4_free_blocks(handle, inode, ext_pblock(&newex),
2391					le16_to_cpu(newex.ee_len));
2392		goto out2;
2393	}
2394
2395	if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2396		EXT4_I(inode)->i_disksize = inode->i_size;
2397
2398	/* previous routine could use block we allocated */
2399	newblock = ext_pblock(&newex);
2400outnew:
2401	__set_bit(BH_New, &bh_result->b_state);
2402
2403	/* Cache only when it is _not_ an uninitialized extent */
2404	if (create != EXT4_CREATE_UNINITIALIZED_EXT)
2405		ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2406						EXT4_EXT_CACHE_EXTENT);
2407out:
2408	if (allocated > max_blocks)
2409		allocated = max_blocks;
2410	ext4_ext_show_leaf(inode, path);
2411	__set_bit(BH_Mapped, &bh_result->b_state);
2412	bh_result->b_bdev = inode->i_sb->s_bdev;
2413	bh_result->b_blocknr = newblock;
2414out2:
2415	if (path) {
2416		ext4_ext_drop_refs(path);
2417		kfree(path);
2418	}
2419	mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2420
2421	return err ? err : allocated;
2422}
2423
2424void ext4_ext_truncate(struct inode * inode, struct page *page)
2425{
2426	struct address_space *mapping = inode->i_mapping;
2427	struct super_block *sb = inode->i_sb;
2428	unsigned long last_block;
2429	handle_t *handle;
2430	int err = 0;
2431
2432	/*
2433	 * probably first extent we're gonna free will be last in block
2434	 */
2435	err = ext4_writepage_trans_blocks(inode) + 3;
2436	handle = ext4_journal_start(inode, err);
2437	if (IS_ERR(handle)) {
2438		if (page) {
2439			clear_highpage(page);
2440			flush_dcache_page(page);
2441			unlock_page(page);
2442			page_cache_release(page);
2443		}
2444		return;
2445	}
2446
2447	if (page)
2448		ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2449
2450	mutex_lock(&EXT4_I(inode)->truncate_mutex);
2451	ext4_ext_invalidate_cache(inode);
2452
2453	/*
2454	 * TODO: optimization is possible here.
2455	 * Probably we need not scan at all,
2456	 * because page truncation is enough.
2457	 */
2458	if (ext4_orphan_add(handle, inode))
2459		goto out_stop;
2460
2461	/* we have to know where to truncate from in crash case */
2462	EXT4_I(inode)->i_disksize = inode->i_size;
2463	ext4_mark_inode_dirty(handle, inode);
2464
2465	last_block = (inode->i_size + sb->s_blocksize - 1)
2466			>> EXT4_BLOCK_SIZE_BITS(sb);
2467	err = ext4_ext_remove_space(inode, last_block);
2468
2469	/* In a multi-transaction truncate, we only make the final
2470	 * transaction synchronous.
2471	 */
2472	if (IS_SYNC(inode))
2473		handle->h_sync = 1;
2474
2475out_stop:
2476	/*
2477	 * If this was a simple ftruncate() and the file will remain alive,
2478	 * then we need to clear up the orphan record which we created above.
2479	 * However, if this was a real unlink then we were called by
2480	 * ext4_delete_inode(), and we allow that function to clean up the
2481	 * orphan info for us.
2482	 */
2483	if (inode->i_nlink)
2484		ext4_orphan_del(handle, inode);
2485
2486	mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2487	ext4_journal_stop(handle);
2488}
2489
2490/*
2491 * ext4_ext_writepage_trans_blocks:
2492 * calculate max number of blocks we could modify
2493 * in order to allocate new block for an inode
2494 */
2495int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2496{
2497	int needed;
2498
2499	needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2500
2501	/* caller wants to allocate num blocks, but note it includes sb */
2502	needed = needed * num - (num - 1);
2503
2504#ifdef CONFIG_QUOTA
2505	needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2506#endif
2507
2508	return needed;
2509}
2510
2511/*
2512 * preallocate space for a file. This implements ext4's fallocate inode
2513 * operation, which gets called from sys_fallocate system call.
2514 * For block-mapped files, posix_fallocate should fall back to the method
2515 * of writing zeroes to the required new blocks (the same behavior which is
2516 * expected for file systems which do not support fallocate() system call).
2517 */
2518long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
2519{
2520	handle_t *handle;
2521	ext4_fsblk_t block, max_blocks;
2522	ext4_fsblk_t nblocks = 0;
2523	int ret = 0;
2524	int ret2 = 0;
2525	int retries = 0;
2526	struct buffer_head map_bh;
2527	unsigned int credits, blkbits = inode->i_blkbits;
2528
2529	/*
2530	 * currently supporting (pre)allocate mode for extent-based
2531	 * files _only_
2532	 */
2533	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
2534		return -EOPNOTSUPP;
2535
2536	/* preallocation to directories is currently not supported */
2537	if (S_ISDIR(inode->i_mode))
2538		return -ENODEV;
2539
2540	block = offset >> blkbits;
2541	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
2542			- block;
2543
2544	/*
2545	 * credits to insert 1 extent into extent tree + buffers to be able to
2546	 * modify 1 super block, 1 block bitmap and 1 group descriptor.
2547	 */
2548	credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
2549retry:
2550	while (ret >= 0 && ret < max_blocks) {
2551		block = block + ret;
2552		max_blocks = max_blocks - ret;
2553		handle = ext4_journal_start(inode, credits);
2554		if (IS_ERR(handle)) {
2555			ret = PTR_ERR(handle);
2556			break;
2557		}
2558
2559		ret = ext4_ext_get_blocks(handle, inode, block,
2560					  max_blocks, &map_bh,
2561					  EXT4_CREATE_UNINITIALIZED_EXT, 0);
2562		WARN_ON(!ret);
2563		if (!ret) {
2564			ext4_error(inode->i_sb, "ext4_fallocate",
2565				   "ext4_ext_get_blocks returned 0! inode#%lu"
2566				   ", block=%llu, max_blocks=%llu",
2567				   inode->i_ino, block, max_blocks);
2568			ret = -EIO;
2569			ext4_mark_inode_dirty(handle, inode);
2570			ret2 = ext4_journal_stop(handle);
2571			break;
2572		}
2573		if (ret > 0) {
2574			/* check wrap through sign-bit/zero here */
2575			if ((block + ret) < 0 || (block + ret) < block) {
2576				ret = -EIO;
2577				ext4_mark_inode_dirty(handle, inode);
2578				ret2 = ext4_journal_stop(handle);
2579				break;
2580			}
2581			if (buffer_new(&map_bh) && ((block + ret) >
2582			    (EXT4_BLOCK_ALIGN(i_size_read(inode), blkbits)
2583			    >> blkbits)))
2584					nblocks = nblocks + ret;
2585		}
2586
2587		/* Update ctime if new blocks get allocated */
2588		if (nblocks) {
2589			struct timespec now;
2590
2591			now = current_fs_time(inode->i_sb);
2592			if (!timespec_equal(&inode->i_ctime, &now))
2593				inode->i_ctime = now;
2594		}
2595
2596		ext4_mark_inode_dirty(handle, inode);
2597		ret2 = ext4_journal_stop(handle);
2598		if (ret2)
2599			break;
2600	}
2601
2602	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2603		goto retry;
2604
2605	/*
2606	 * Time to update the file size.
2607	 * Update only when preallocation was requested beyond the file size.
2608	 */
2609	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2610	    (offset + len) > i_size_read(inode)) {
2611		if (ret > 0) {
2612			/*
2613			 * if no error, we assume preallocation succeeded
2614			 * completely
2615			 */
2616			mutex_lock(&inode->i_mutex);
2617			i_size_write(inode, offset + len);
2618			EXT4_I(inode)->i_disksize = i_size_read(inode);
2619			mutex_unlock(&inode->i_mutex);
2620		} else if (ret < 0 && nblocks) {
2621			/* Handle partial allocation scenario */
2622			loff_t newsize;
2623
2624			mutex_lock(&inode->i_mutex);
2625			newsize  = (nblocks << blkbits) + i_size_read(inode);
2626			i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
2627			EXT4_I(inode)->i_disksize = i_size_read(inode);
2628			mutex_unlock(&inode->i_mutex);
2629		}
2630	}
2631
2632	return ret > 0 ? ret2 : ret;
2633}
2634