block.c revision 8e2399d57ac2bec1830e27deeeac66002d81001c
1/*
2 * block.c --- iterate over all blocks in an inode
3 *
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
5 *
6 * %Begin-Header%
7 * This file may be redistributed under the terms of the GNU Public
8 * License.
9 * %End-Header%
10 */
11
12#include <stdio.h>
13#include <string.h>
14#if HAVE_UNISTD_H
15#include <unistd.h>
16#endif
17
18#include "ext2_fs.h"
19#include "ext2fs.h"
20
21struct block_context {
22	ext2_filsys	fs;
23	int (*func)(ext2_filsys	fs,
24		    blk_t	*blocknr,
25		    e2_blkcnt_t	bcount,
26		    blk_t	ref_blk,
27		    int		ref_offset,
28		    void	*priv_data);
29	e2_blkcnt_t	bcount;
30	int		bsize;
31	int		flags;
32	errcode_t	errcode;
33	char	*ind_buf;
34	char	*dind_buf;
35	char	*tind_buf;
36	void	*priv_data;
37};
38
39#define check_for_ro_violation_return(ctx, ret)				\
40	do {								\
41		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
42		    ((ret) & BLOCK_CHANGED)) {				\
43			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
44			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
45			return ret;					\
46		}							\
47	} while (0)
48
49#define check_for_ro_violation_goto(ctx, ret, label)			\
50	do {								\
51		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
52		    ((ret) & BLOCK_CHANGED)) {				\
53			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
54			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
55			goto label;					\
56		}							\
57	} while (0)
58
59static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
60			     int ref_offset, struct block_context *ctx)
61{
62	int	ret = 0, changed = 0;
63	int	i, flags, limit, offset;
64	blk_t	*block_nr;
65
66	limit = ctx->fs->blocksize >> 2;
67	if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
68	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
69		ret = (*ctx->func)(ctx->fs, ind_block,
70				   BLOCK_COUNT_IND, ref_block,
71				   ref_offset, ctx->priv_data);
72	check_for_ro_violation_return(ctx, ret);
73	if (!*ind_block || (ret & BLOCK_ABORT)) {
74		ctx->bcount += limit;
75		return ret;
76	}
77	if (*ind_block >= ctx->fs->super->s_blocks_count ||
78	    *ind_block < ctx->fs->super->s_first_data_block) {
79		ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
80		ret |= BLOCK_ERROR;
81		return ret;
82	}
83	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
84					     ctx->ind_buf);
85	if (ctx->errcode) {
86		ret |= BLOCK_ERROR;
87		return ret;
88	}
89
90	block_nr = (blk_t *) ctx->ind_buf;
91	offset = 0;
92	if (ctx->flags & BLOCK_FLAG_APPEND) {
93		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
94			flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
95					     *ind_block, offset,
96					     ctx->priv_data);
97			changed	|= flags;
98			if (flags & BLOCK_ABORT) {
99				ret |= BLOCK_ABORT;
100				break;
101			}
102			offset += sizeof(blk_t);
103		}
104	} else {
105		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
106			if (*block_nr == 0)
107				continue;
108			flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
109					     *ind_block, offset,
110					     ctx->priv_data);
111			changed	|= flags;
112			if (flags & BLOCK_ABORT) {
113				ret |= BLOCK_ABORT;
114				break;
115			}
116			offset += sizeof(blk_t);
117		}
118	}
119	check_for_ro_violation_return(ctx, changed);
120	if (changed & BLOCK_CHANGED) {
121		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
122						      ctx->ind_buf);
123		if (ctx->errcode)
124			ret |= BLOCK_ERROR | BLOCK_ABORT;
125	}
126	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
127	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
128	    !(ret & BLOCK_ABORT))
129		ret |= (*ctx->func)(ctx->fs, ind_block,
130				    BLOCK_COUNT_IND, ref_block,
131				    ref_offset, ctx->priv_data);
132	check_for_ro_violation_return(ctx, ret);
133	return ret;
134}
135
136static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
137			      int ref_offset, struct block_context *ctx)
138{
139	int	ret = 0, changed = 0;
140	int	i, flags, limit, offset;
141	blk_t	*block_nr;
142
143	limit = ctx->fs->blocksize >> 2;
144	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
145			    BLOCK_FLAG_DATA_ONLY)))
146		ret = (*ctx->func)(ctx->fs, dind_block,
147				   BLOCK_COUNT_DIND, ref_block,
148				   ref_offset, ctx->priv_data);
149	check_for_ro_violation_return(ctx, ret);
150	if (!*dind_block || (ret & BLOCK_ABORT)) {
151		ctx->bcount += limit*limit;
152		return ret;
153	}
154	if (*dind_block >= ctx->fs->super->s_blocks_count ||
155	    *dind_block < ctx->fs->super->s_first_data_block) {
156		ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
157		ret |= BLOCK_ERROR;
158		return ret;
159	}
160	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
161					     ctx->dind_buf);
162	if (ctx->errcode) {
163		ret |= BLOCK_ERROR;
164		return ret;
165	}
166
167	block_nr = (blk_t *) ctx->dind_buf;
168	offset = 0;
169	if (ctx->flags & BLOCK_FLAG_APPEND) {
170		for (i = 0; i < limit; i++, block_nr++) {
171			flags = block_iterate_ind(block_nr,
172						  *dind_block, offset,
173						  ctx);
174			changed |= flags;
175			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
176				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
177				break;
178			}
179			offset += sizeof(blk_t);
180		}
181	} else {
182		for (i = 0; i < limit; i++, block_nr++) {
183			if (*block_nr == 0) {
184				ctx->bcount += limit;
185				continue;
186			}
187			flags = block_iterate_ind(block_nr,
188						  *dind_block, offset,
189						  ctx);
190			changed |= flags;
191			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
192				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
193				break;
194			}
195			offset += sizeof(blk_t);
196		}
197	}
198	check_for_ro_violation_return(ctx, changed);
199	if (changed & BLOCK_CHANGED) {
200		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
201						      ctx->dind_buf);
202		if (ctx->errcode)
203			ret |= BLOCK_ERROR | BLOCK_ABORT;
204	}
205	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
206	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
207	    !(ret & BLOCK_ABORT))
208		ret |= (*ctx->func)(ctx->fs, dind_block,
209				    BLOCK_COUNT_DIND, ref_block,
210				    ref_offset, ctx->priv_data);
211	check_for_ro_violation_return(ctx, ret);
212	return ret;
213}
214
215static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
216			      int ref_offset, struct block_context *ctx)
217{
218	int	ret = 0, changed = 0;
219	int	i, flags, limit, offset;
220	blk_t	*block_nr;
221
222	limit = ctx->fs->blocksize >> 2;
223	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
224			    BLOCK_FLAG_DATA_ONLY)))
225		ret = (*ctx->func)(ctx->fs, tind_block,
226				   BLOCK_COUNT_TIND, ref_block,
227				   ref_offset, ctx->priv_data);
228	check_for_ro_violation_return(ctx, ret);
229	if (!*tind_block || (ret & BLOCK_ABORT)) {
230		ctx->bcount += limit*limit*limit;
231		return ret;
232	}
233	if (*tind_block >= ctx->fs->super->s_blocks_count ||
234	    *tind_block < ctx->fs->super->s_first_data_block) {
235		ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
236		ret |= BLOCK_ERROR;
237		return ret;
238	}
239	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
240					     ctx->tind_buf);
241	if (ctx->errcode) {
242		ret |= BLOCK_ERROR;
243		return ret;
244	}
245
246	block_nr = (blk_t *) ctx->tind_buf;
247	offset = 0;
248	if (ctx->flags & BLOCK_FLAG_APPEND) {
249		for (i = 0; i < limit; i++, block_nr++) {
250			flags = block_iterate_dind(block_nr,
251						   *tind_block,
252						   offset, ctx);
253			changed |= flags;
254			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
255				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
256				break;
257			}
258			offset += sizeof(blk_t);
259		}
260	} else {
261		for (i = 0; i < limit; i++, block_nr++) {
262			if (*block_nr == 0) {
263				ctx->bcount += limit*limit;
264				continue;
265			}
266			flags = block_iterate_dind(block_nr,
267						   *tind_block,
268						   offset, ctx);
269			changed |= flags;
270			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
271				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
272				break;
273			}
274			offset += sizeof(blk_t);
275		}
276	}
277	check_for_ro_violation_return(ctx, changed);
278	if (changed & BLOCK_CHANGED) {
279		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
280						      ctx->tind_buf);
281		if (ctx->errcode)
282			ret |= BLOCK_ERROR | BLOCK_ABORT;
283	}
284	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
285	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
286	    !(ret & BLOCK_ABORT))
287		ret |= (*ctx->func)(ctx->fs, tind_block,
288				    BLOCK_COUNT_TIND, ref_block,
289				    ref_offset, ctx->priv_data);
290	check_for_ro_violation_return(ctx, ret);
291	return ret;
292}
293
294errcode_t ext2fs_block_iterate2(ext2_filsys fs,
295				ext2_ino_t ino,
296				int	flags,
297				char *block_buf,
298				int (*func)(ext2_filsys fs,
299					    blk_t	*blocknr,
300					    e2_blkcnt_t	blockcnt,
301					    blk_t	ref_blk,
302					    int		ref_offset,
303					    void	*priv_data),
304				void *priv_data)
305{
306	int	i;
307	int	r, ret = 0;
308	struct ext2_inode inode;
309	errcode_t	retval;
310	struct block_context ctx;
311	int	limit;
312
313	EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
314
315	ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
316	if (ctx.errcode)
317		return ctx.errcode;
318
319	/*
320	 * Check to see if we need to limit large files
321	 */
322	if (flags & BLOCK_FLAG_NO_LARGE) {
323		if (!LINUX_S_ISDIR(inode.i_mode) &&
324		    (inode.i_size_high != 0))
325			return EXT2_ET_FILE_TOO_BIG;
326	}
327
328	limit = fs->blocksize >> 2;
329
330	ctx.fs = fs;
331	ctx.func = func;
332	ctx.priv_data = priv_data;
333	ctx.flags = flags;
334	ctx.bcount = 0;
335	if (block_buf) {
336		ctx.ind_buf = block_buf;
337	} else {
338		retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
339		if (retval)
340			return retval;
341	}
342	ctx.dind_buf = ctx.ind_buf + fs->blocksize;
343	ctx.tind_buf = ctx.dind_buf + fs->blocksize;
344
345	/*
346	 * Iterate over the HURD translator block (if present)
347	 */
348	if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
349	    !(flags & BLOCK_FLAG_DATA_ONLY)) {
350		if (inode.osd1.hurd1.h_i_translator) {
351			ret |= (*ctx.func)(fs,
352					   &inode.osd1.hurd1.h_i_translator,
353					   BLOCK_COUNT_TRANSLATOR,
354					   0, 0, priv_data);
355			if (ret & BLOCK_ABORT)
356				goto abort_exit;
357			check_for_ro_violation_goto(&ctx, ret, abort_exit);
358		}
359	}
360
361	if (inode.i_flags & EXT4_EXTENTS_FL) {
362		ext2_extent_handle_t	handle;
363		struct ext2fs_extent	extent;
364		e2_blkcnt_t		blockcnt = 0;
365		blk_t			blk, new_blk;
366		int			op = EXT2_EXTENT_ROOT;
367		int			uninit;
368		unsigned int		j;
369
370		ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
371		if (ctx.errcode)
372			goto abort_exit;
373
374		while (1) {
375			ctx.errcode = ext2fs_extent_get(handle, op, &extent);
376			if (ctx.errcode) {
377				if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
378					break;
379				ctx.errcode = 0;
380				if (!(flags & BLOCK_FLAG_APPEND))
381					break;
382			next_block_set:
383				blk = 0;
384				r = (*ctx.func)(fs, &blk, blockcnt,
385						0, 0, priv_data);
386				ret |= r;
387				check_for_ro_violation_goto(&ctx, ret,
388							    extent_errout);
389				if (r & BLOCK_CHANGED) {
390					ctx.errcode =
391						ext2fs_extent_set_bmap(handle,
392						       (blk64_t) blockcnt++,
393						       (blk64_t) blk, 0);
394					if (ctx.errcode || (ret & BLOCK_ABORT))
395						break;
396					if (blk)
397						goto next_block_set;
398				}
399				break;
400			}
401
402			op = EXT2_EXTENT_NEXT;
403			blk = extent.e_pblk;
404			if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
405				if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
406					continue;
407				if ((!(extent.e_flags &
408				       EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
409				     !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
410				    ((extent.e_flags &
411				      EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
412				     (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
413					ret |= (*ctx.func)(fs, &blk,
414							   -1, 0, 0, priv_data);
415					if (ret & BLOCK_CHANGED) {
416						extent.e_pblk = blk;
417						ctx.errcode =
418				ext2fs_extent_replace(handle, 0, &extent);
419						if (ctx.errcode)
420							break;
421					}
422				}
423				continue;
424			}
425			uninit = 0;
426			if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
427				uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
428			for (blockcnt = extent.e_lblk, j = 0;
429			     j < extent.e_len;
430			     blk++, blockcnt++, j++) {
431				new_blk = blk;
432				r = (*ctx.func)(fs, &new_blk, blockcnt,
433						0, 0, priv_data);
434				ret |= r;
435				check_for_ro_violation_goto(&ctx, ret,
436							    extent_errout);
437				if (r & BLOCK_CHANGED) {
438					ctx.errcode =
439						ext2fs_extent_set_bmap(handle,
440						       (blk64_t) blockcnt,
441						       (blk64_t) new_blk,
442						       uninit);
443					if (ctx.errcode)
444						goto extent_errout;
445				}
446				if (ret & BLOCK_ABORT)
447					break;
448			}
449		}
450
451	extent_errout:
452		ext2fs_extent_free(handle);
453		ret |= BLOCK_ERROR | BLOCK_ABORT;
454		goto errout;
455	}
456
457	/*
458	 * Iterate over normal data blocks
459	 */
460	for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
461		if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
462			ret |= (*ctx.func)(fs, &inode.i_block[i],
463					    ctx.bcount, 0, i, priv_data);
464			if (ret & BLOCK_ABORT)
465				goto abort_exit;
466		}
467	}
468	check_for_ro_violation_goto(&ctx, ret, abort_exit);
469	if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
470		ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
471					 0, EXT2_IND_BLOCK, &ctx);
472		if (ret & BLOCK_ABORT)
473			goto abort_exit;
474	} else
475		ctx.bcount += limit;
476	if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
477		ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
478					  0, EXT2_DIND_BLOCK, &ctx);
479		if (ret & BLOCK_ABORT)
480			goto abort_exit;
481	} else
482		ctx.bcount += limit * limit;
483	if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
484		ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
485					  0, EXT2_TIND_BLOCK, &ctx);
486		if (ret & BLOCK_ABORT)
487			goto abort_exit;
488	}
489
490abort_exit:
491	if (ret & BLOCK_CHANGED) {
492		retval = ext2fs_write_inode(fs, ino, &inode);
493		if (retval) {
494			ret |= BLOCK_ERROR;
495			ctx.errcode = retval;
496		}
497	}
498errout:
499	if (!block_buf)
500		ext2fs_free_mem(&ctx.ind_buf);
501
502	return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
503}
504
505/*
506 * Emulate the old ext2fs_block_iterate function!
507 */
508
509struct xlate {
510	int (*func)(ext2_filsys	fs,
511		    blk_t	*blocknr,
512		    int		bcount,
513		    void	*priv_data);
514	void *real_private;
515};
516
517#ifdef __TURBOC__
518 #pragma argsused
519#endif
520static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
521		      blk_t ref_block EXT2FS_ATTR((unused)),
522		      int ref_offset EXT2FS_ATTR((unused)),
523		      void *priv_data)
524{
525	struct xlate *xl = (struct xlate *) priv_data;
526
527	return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
528}
529
530errcode_t ext2fs_block_iterate(ext2_filsys fs,
531			       ext2_ino_t ino,
532			       int	flags,
533			       char *block_buf,
534			       int (*func)(ext2_filsys fs,
535					   blk_t	*blocknr,
536					   int	blockcnt,
537					   void	*priv_data),
538			       void *priv_data)
539{
540	struct xlate xl;
541
542	xl.real_private = priv_data;
543	xl.func = func;
544
545	return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
546				     block_buf, xlate_func, &xl);
547}
548
549