1/*
2 * block.c --- iterate over all blocks in an inode
3 *
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
5 *
6 * %Begin-Header%
7 * This file may be redistributed under the terms of the GNU Library
8 * General Public License, version 2.
9 * %End-Header%
10 */
11
12#include "config.h"
13#include <stdio.h>
14#include <string.h>
15#if HAVE_UNISTD_H
16#include <unistd.h>
17#endif
18
19#include "ext2_fs.h"
20#include "ext2fs.h"
21
22struct block_context {
23	ext2_filsys	fs;
24	int (*func)(ext2_filsys	fs,
25		    blk64_t	*blocknr,
26		    e2_blkcnt_t	bcount,
27		    blk64_t	ref_blk,
28		    int		ref_offset,
29		    void	*priv_data);
30	e2_blkcnt_t	bcount;
31	int		bsize;
32	int		flags;
33	errcode_t	errcode;
34	char	*ind_buf;
35	char	*dind_buf;
36	char	*tind_buf;
37	void	*priv_data;
38};
39
40#define check_for_ro_violation_return(ctx, ret)				\
41	do {								\
42		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
43		    ((ret) & BLOCK_CHANGED)) {				\
44			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
45			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
46			return ret;					\
47		}							\
48	} while (0)
49
50#define check_for_ro_violation_goto(ctx, ret, label)			\
51	do {								\
52		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
53		    ((ret) & BLOCK_CHANGED)) {				\
54			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
55			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
56			goto label;					\
57		}							\
58	} while (0)
59
60static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
61			     int ref_offset, struct block_context *ctx)
62{
63	int	ret = 0, changed = 0;
64	int	i, flags, limit, offset;
65	blk_t	*block_nr;
66	blk64_t	blk64;
67
68	limit = ctx->fs->blocksize >> 2;
69	if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
70	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) {
71		blk64 = *ind_block;
72		ret = (*ctx->func)(ctx->fs, &blk64,
73				   BLOCK_COUNT_IND, ref_block,
74				   ref_offset, ctx->priv_data);
75		*ind_block = blk64;
76	}
77	check_for_ro_violation_return(ctx, ret);
78	if (!*ind_block || (ret & BLOCK_ABORT)) {
79		ctx->bcount += limit;
80		return ret;
81	}
82	if (*ind_block >= ext2fs_blocks_count(ctx->fs->super) ||
83	    *ind_block < ctx->fs->super->s_first_data_block) {
84		ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
85		ret |= BLOCK_ERROR;
86		return ret;
87	}
88	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
89					     ctx->ind_buf);
90	if (ctx->errcode) {
91		ret |= BLOCK_ERROR;
92		return ret;
93	}
94
95	block_nr = (blk_t *) ctx->ind_buf;
96	offset = 0;
97	if (ctx->flags & BLOCK_FLAG_APPEND) {
98		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
99			blk64 = *block_nr;
100			flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
101					     *ind_block, offset,
102					     ctx->priv_data);
103			*block_nr = blk64;
104			changed	|= flags;
105			if (flags & BLOCK_ABORT) {
106				ret |= BLOCK_ABORT;
107				break;
108			}
109			offset += sizeof(blk_t);
110		}
111	} else {
112		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
113			if (*block_nr == 0)
114				goto skip_sparse;
115			blk64 = *block_nr;
116			flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
117					     *ind_block, offset,
118					     ctx->priv_data);
119			*block_nr = blk64;
120			changed	|= flags;
121			if (flags & BLOCK_ABORT) {
122				ret |= BLOCK_ABORT;
123				break;
124			}
125		skip_sparse:
126			offset += sizeof(blk_t);
127		}
128	}
129	check_for_ro_violation_return(ctx, changed);
130	if (changed & BLOCK_CHANGED) {
131		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
132						      ctx->ind_buf);
133		if (ctx->errcode)
134			ret |= BLOCK_ERROR | BLOCK_ABORT;
135	}
136	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
137	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
138	    !(ret & BLOCK_ABORT)) {
139		blk64 = *ind_block;
140		ret |= (*ctx->func)(ctx->fs, &blk64,
141				    BLOCK_COUNT_IND, ref_block,
142				    ref_offset, ctx->priv_data);
143		*ind_block = blk64;
144	}
145	check_for_ro_violation_return(ctx, ret);
146	return ret;
147}
148
149static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
150			      int ref_offset, struct block_context *ctx)
151{
152	int	ret = 0, changed = 0;
153	int	i, flags, limit, offset;
154	blk_t	*block_nr;
155	blk64_t	blk64;
156
157	limit = ctx->fs->blocksize >> 2;
158	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
159			    BLOCK_FLAG_DATA_ONLY))) {
160		blk64 = *dind_block;
161		ret = (*ctx->func)(ctx->fs, &blk64,
162				   BLOCK_COUNT_DIND, ref_block,
163				   ref_offset, ctx->priv_data);
164		*dind_block = blk64;
165	}
166	check_for_ro_violation_return(ctx, ret);
167	if (!*dind_block || (ret & BLOCK_ABORT)) {
168		ctx->bcount += limit*limit;
169		return ret;
170	}
171	if (*dind_block >= ext2fs_blocks_count(ctx->fs->super) ||
172	    *dind_block < ctx->fs->super->s_first_data_block) {
173		ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
174		ret |= BLOCK_ERROR;
175		return ret;
176	}
177	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
178					     ctx->dind_buf);
179	if (ctx->errcode) {
180		ret |= BLOCK_ERROR;
181		return ret;
182	}
183
184	block_nr = (blk_t *) ctx->dind_buf;
185	offset = 0;
186	if (ctx->flags & BLOCK_FLAG_APPEND) {
187		for (i = 0; i < limit; i++, block_nr++) {
188			flags = block_iterate_ind(block_nr,
189						  *dind_block, offset,
190						  ctx);
191			changed |= flags;
192			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
193				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
194				break;
195			}
196			offset += sizeof(blk_t);
197		}
198	} else {
199		for (i = 0; i < limit; i++, block_nr++) {
200			if (*block_nr == 0) {
201				ctx->bcount += limit;
202				continue;
203			}
204			flags = block_iterate_ind(block_nr,
205						  *dind_block, offset,
206						  ctx);
207			changed |= flags;
208			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
209				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
210				break;
211			}
212			offset += sizeof(blk_t);
213		}
214	}
215	check_for_ro_violation_return(ctx, changed);
216	if (changed & BLOCK_CHANGED) {
217		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
218						      ctx->dind_buf);
219		if (ctx->errcode)
220			ret |= BLOCK_ERROR | BLOCK_ABORT;
221	}
222	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
223	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
224	    !(ret & BLOCK_ABORT)) {
225		blk64 = *dind_block;
226		ret |= (*ctx->func)(ctx->fs, &blk64,
227				    BLOCK_COUNT_DIND, ref_block,
228				    ref_offset, ctx->priv_data);
229		*dind_block = blk64;
230	}
231	check_for_ro_violation_return(ctx, ret);
232	return ret;
233}
234
235static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
236			      int ref_offset, struct block_context *ctx)
237{
238	int	ret = 0, changed = 0;
239	int	i, flags, limit, offset;
240	blk_t	*block_nr;
241	blk64_t	blk64;
242
243	limit = ctx->fs->blocksize >> 2;
244	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
245			    BLOCK_FLAG_DATA_ONLY))) {
246		blk64 = *tind_block;
247		ret = (*ctx->func)(ctx->fs, &blk64,
248				   BLOCK_COUNT_TIND, ref_block,
249				   ref_offset, ctx->priv_data);
250		*tind_block = blk64;
251	}
252	check_for_ro_violation_return(ctx, ret);
253	if (!*tind_block || (ret & BLOCK_ABORT)) {
254		ctx->bcount += limit*limit*limit;
255		return ret;
256	}
257	if (*tind_block >= ext2fs_blocks_count(ctx->fs->super) ||
258	    *tind_block < ctx->fs->super->s_first_data_block) {
259		ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
260		ret |= BLOCK_ERROR;
261		return ret;
262	}
263	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
264					     ctx->tind_buf);
265	if (ctx->errcode) {
266		ret |= BLOCK_ERROR;
267		return ret;
268	}
269
270	block_nr = (blk_t *) ctx->tind_buf;
271	offset = 0;
272	if (ctx->flags & BLOCK_FLAG_APPEND) {
273		for (i = 0; i < limit; i++, block_nr++) {
274			flags = block_iterate_dind(block_nr,
275						   *tind_block,
276						   offset, ctx);
277			changed |= flags;
278			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
279				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
280				break;
281			}
282			offset += sizeof(blk_t);
283		}
284	} else {
285		for (i = 0; i < limit; i++, block_nr++) {
286			if (*block_nr == 0) {
287				ctx->bcount += limit*limit;
288				continue;
289			}
290			flags = block_iterate_dind(block_nr,
291						   *tind_block,
292						   offset, ctx);
293			changed |= flags;
294			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
295				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
296				break;
297			}
298			offset += sizeof(blk_t);
299		}
300	}
301	check_for_ro_violation_return(ctx, changed);
302	if (changed & BLOCK_CHANGED) {
303		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
304						      ctx->tind_buf);
305		if (ctx->errcode)
306			ret |= BLOCK_ERROR | BLOCK_ABORT;
307	}
308	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
309	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
310	    !(ret & BLOCK_ABORT)) {
311		blk64 = *tind_block;
312		ret |= (*ctx->func)(ctx->fs, &blk64,
313				    BLOCK_COUNT_TIND, ref_block,
314				    ref_offset, ctx->priv_data);
315		*tind_block = blk64;
316	}
317	check_for_ro_violation_return(ctx, ret);
318	return ret;
319}
320
321errcode_t ext2fs_block_iterate3(ext2_filsys fs,
322				ext2_ino_t ino,
323				int	flags,
324				char *block_buf,
325				int (*func)(ext2_filsys fs,
326					    blk64_t	*blocknr,
327					    e2_blkcnt_t	blockcnt,
328					    blk64_t	ref_blk,
329					    int		ref_offset,
330					    void	*priv_data),
331				void *priv_data)
332{
333	int	i;
334	int	r, ret = 0;
335	struct ext2_inode inode;
336	errcode_t	retval;
337	struct block_context ctx;
338	int	limit;
339	blk64_t	blk64;
340
341	EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
342
343	ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
344	if (ctx.errcode)
345		return ctx.errcode;
346
347	/*
348	 * An inode with inline data has no blocks over which to
349	 * iterate, so return an error code indicating this fact.
350	 */
351	if (inode.i_flags & EXT4_INLINE_DATA_FL)
352		return EXT2_ET_INLINE_DATA_CANT_ITERATE;
353
354	/*
355	 * Check to see if we need to limit large files
356	 */
357	if (flags & BLOCK_FLAG_NO_LARGE) {
358		if (!LINUX_S_ISDIR(inode.i_mode) &&
359		    (inode.i_size_high != 0))
360			return EXT2_ET_FILE_TOO_BIG;
361	}
362
363	limit = fs->blocksize >> 2;
364
365	ctx.fs = fs;
366	ctx.func = func;
367	ctx.priv_data = priv_data;
368	ctx.flags = flags;
369	ctx.bcount = 0;
370	if (block_buf) {
371		ctx.ind_buf = block_buf;
372	} else {
373		retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
374		if (retval)
375			return retval;
376	}
377	ctx.dind_buf = ctx.ind_buf + fs->blocksize;
378	ctx.tind_buf = ctx.dind_buf + fs->blocksize;
379
380	/*
381	 * Iterate over the HURD translator block (if present)
382	 */
383	if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
384	    !(flags & BLOCK_FLAG_DATA_ONLY)) {
385		if (inode.osd1.hurd1.h_i_translator) {
386			blk64 = inode.osd1.hurd1.h_i_translator;
387			ret |= (*ctx.func)(fs, &blk64,
388					   BLOCK_COUNT_TRANSLATOR,
389					   0, 0, priv_data);
390			inode.osd1.hurd1.h_i_translator = (blk_t) blk64;
391			if (ret & BLOCK_ABORT)
392				goto abort_exit;
393			check_for_ro_violation_goto(&ctx, ret, abort_exit);
394		}
395	}
396
397	if (inode.i_flags & EXT4_EXTENTS_FL) {
398		ext2_extent_handle_t	handle;
399		struct ext2fs_extent	extent, next;
400		e2_blkcnt_t		blockcnt = 0;
401		blk64_t			blk, new_blk;
402		int			op = EXT2_EXTENT_ROOT;
403		int			uninit;
404		unsigned int		j;
405
406		ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
407		if (ctx.errcode)
408			goto abort_exit;
409
410		while (1) {
411			if (op == EXT2_EXTENT_CURRENT)
412				ctx.errcode = 0;
413			else
414				ctx.errcode = ext2fs_extent_get(handle, op,
415								&extent);
416			if (ctx.errcode) {
417				if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
418					break;
419				ctx.errcode = 0;
420				if (!(flags & BLOCK_FLAG_APPEND))
421					break;
422			next_block_set:
423				blk = 0;
424				r = (*ctx.func)(fs, &blk, blockcnt,
425						0, 0, priv_data);
426				ret |= r;
427				check_for_ro_violation_goto(&ctx, ret,
428							    extent_done);
429				if (r & BLOCK_CHANGED) {
430					ctx.errcode =
431						ext2fs_extent_set_bmap(handle,
432						       (blk64_t) blockcnt++,
433						       (blk64_t) blk, 0);
434					if (ctx.errcode || (ret & BLOCK_ABORT))
435						break;
436					if (blk)
437						goto next_block_set;
438				}
439				break;
440			}
441
442			op = EXT2_EXTENT_NEXT;
443			blk = extent.e_pblk;
444			if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
445				if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
446					continue;
447				if ((!(extent.e_flags &
448				       EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
449				     !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
450				    ((extent.e_flags &
451				      EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
452				     (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
453					ret |= (*ctx.func)(fs, &blk,
454							   -1, 0, 0, priv_data);
455					if (ret & BLOCK_CHANGED) {
456						extent.e_pblk = blk;
457						ctx.errcode =
458				ext2fs_extent_replace(handle, 0, &extent);
459						if (ctx.errcode)
460							break;
461					}
462					if (ret & BLOCK_ABORT)
463						break;
464				}
465				continue;
466			}
467			uninit = 0;
468			if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
469				uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
470
471			/*
472			 * Get the next extent before we start messing
473			 * with the current extent
474			 */
475			retval = ext2fs_extent_get(handle, op, &next);
476
477#if 0
478			printf("lblk %llu pblk %llu len %d blockcnt %llu\n",
479			       extent.e_lblk, extent.e_pblk,
480			       extent.e_len, blockcnt);
481#endif
482			if (extent.e_lblk + extent.e_len <= (blk64_t) blockcnt)
483				continue;
484			if (extent.e_lblk > (blk64_t) blockcnt)
485				blockcnt = extent.e_lblk;
486			j = blockcnt - extent.e_lblk;
487			blk += j;
488			for (blockcnt = extent.e_lblk, j = 0;
489			     j < extent.e_len;
490			     blk++, blockcnt++, j++) {
491				new_blk = blk;
492				r = (*ctx.func)(fs, &new_blk, blockcnt,
493						0, 0, priv_data);
494				ret |= r;
495				check_for_ro_violation_goto(&ctx, ret,
496							    extent_done);
497				if (r & BLOCK_CHANGED) {
498					ctx.errcode =
499						ext2fs_extent_set_bmap(handle,
500						       (blk64_t) blockcnt,
501						       new_blk, uninit);
502					if (ctx.errcode)
503						goto extent_done;
504				}
505				if (ret & BLOCK_ABORT)
506					goto extent_done;
507			}
508			if (retval == 0) {
509				extent = next;
510				op = EXT2_EXTENT_CURRENT;
511			}
512		}
513
514	extent_done:
515		ext2fs_extent_free(handle);
516		ret |= BLOCK_ERROR; /* ctx.errcode is always valid here */
517		goto errout;
518	}
519
520	/*
521	 * Iterate over normal data blocks
522	 */
523	for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
524		if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
525			blk64 = inode.i_block[i];
526			ret |= (*ctx.func)(fs, &blk64, ctx.bcount, 0, i,
527					   priv_data);
528			inode.i_block[i] = (blk_t) blk64;
529			if (ret & BLOCK_ABORT)
530				goto abort_exit;
531		}
532	}
533	check_for_ro_violation_goto(&ctx, ret, abort_exit);
534	if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
535		ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
536					 0, EXT2_IND_BLOCK, &ctx);
537		if (ret & BLOCK_ABORT)
538			goto abort_exit;
539	} else
540		ctx.bcount += limit;
541	if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
542		ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
543					  0, EXT2_DIND_BLOCK, &ctx);
544		if (ret & BLOCK_ABORT)
545			goto abort_exit;
546	} else
547		ctx.bcount += limit * limit;
548	if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
549		ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
550					  0, EXT2_TIND_BLOCK, &ctx);
551		if (ret & BLOCK_ABORT)
552			goto abort_exit;
553	}
554
555abort_exit:
556	if (ret & BLOCK_CHANGED) {
557		retval = ext2fs_write_inode(fs, ino, &inode);
558		if (retval) {
559			ret |= BLOCK_ERROR;
560			ctx.errcode = retval;
561		}
562	}
563errout:
564	if (!block_buf)
565		ext2fs_free_mem(&ctx.ind_buf);
566
567	return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
568}
569
570/*
571 * Emulate the old ext2fs_block_iterate function!
572 */
573
574struct xlate64 {
575	int (*func)(ext2_filsys fs,
576		    blk_t	*blocknr,
577		    e2_blkcnt_t	blockcnt,
578		    blk_t	ref_blk,
579		    int		ref_offset,
580		    void	*priv_data);
581	void *real_private;
582};
583
584static int xlate64_func(ext2_filsys fs, blk64_t	*blocknr,
585			e2_blkcnt_t blockcnt, blk64_t ref_blk,
586			int ref_offset, void *priv_data)
587{
588	struct xlate64 *xl = (struct xlate64 *) priv_data;
589	int		ret;
590	blk_t		block32 = *blocknr;
591
592	ret = (*xl->func)(fs, &block32, blockcnt, (blk_t) ref_blk, ref_offset,
593			     xl->real_private);
594	*blocknr = block32;
595	return ret;
596}
597
598errcode_t ext2fs_block_iterate2(ext2_filsys fs,
599				ext2_ino_t ino,
600				int	flags,
601				char *block_buf,
602				int (*func)(ext2_filsys fs,
603					    blk_t	*blocknr,
604					    e2_blkcnt_t	blockcnt,
605					    blk_t	ref_blk,
606					    int		ref_offset,
607					    void	*priv_data),
608				void *priv_data)
609{
610	struct xlate64 xl;
611
612	xl.real_private = priv_data;
613	xl.func = func;
614
615	return ext2fs_block_iterate3(fs, ino, flags, block_buf,
616				     xlate64_func, &xl);
617}
618
619
620struct xlate {
621	int (*func)(ext2_filsys	fs,
622		    blk_t	*blocknr,
623		    int		bcount,
624		    void	*priv_data);
625	void *real_private;
626};
627
628#ifdef __TURBOC__
629 #pragma argsused
630#endif
631static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
632		      blk_t ref_block EXT2FS_ATTR((unused)),
633		      int ref_offset EXT2FS_ATTR((unused)),
634		      void *priv_data)
635{
636	struct xlate *xl = (struct xlate *) priv_data;
637
638	return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
639}
640
641errcode_t ext2fs_block_iterate(ext2_filsys fs,
642			       ext2_ino_t ino,
643			       int	flags,
644			       char *block_buf,
645			       int (*func)(ext2_filsys fs,
646					   blk_t	*blocknr,
647					   int	blockcnt,
648					   void	*priv_data),
649			       void *priv_data)
650{
651	struct xlate xl;
652
653	xl.real_private = priv_data;
654	xl.func = func;
655
656	return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
657				     block_buf, xlate_func, &xl);
658}
659
660