block.c revision 4efbac6fed75c29d3d5f1b676b932754653a2ac5
1/*
2 * block.c --- iterate over all blocks in an inode
3 *
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
5 *
6 * %Begin-Header%
7 * This file may be redistributed under the terms of the GNU Public
8 * License.
9 * %End-Header%
10 */
11
12#include <stdio.h>
13#include <string.h>
14#if HAVE_UNISTD_H
15#include <unistd.h>
16#endif
17
18#include "ext2_fs.h"
19#include "ext2fs.h"
20
21struct block_context {
22	ext2_filsys	fs;
23	int (*func)(ext2_filsys	fs,
24		    blk64_t	*blocknr,
25		    e2_blkcnt_t	bcount,
26		    blk64_t	ref_blk,
27		    int		ref_offset,
28		    void	*priv_data);
29	e2_blkcnt_t	bcount;
30	int		bsize;
31	int		flags;
32	errcode_t	errcode;
33	char	*ind_buf;
34	char	*dind_buf;
35	char	*tind_buf;
36	void	*priv_data;
37};
38
39#define check_for_ro_violation_return(ctx, ret)				\
40	do {								\
41		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
42		    ((ret) & BLOCK_CHANGED)) {				\
43			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
44			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
45			return ret;					\
46		}							\
47	} while (0)
48
49#define check_for_ro_violation_goto(ctx, ret, label)			\
50	do {								\
51		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
52		    ((ret) & BLOCK_CHANGED)) {				\
53			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
54			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
55			goto label;					\
56		}							\
57	} while (0)
58
59static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
60			     int ref_offset, struct block_context *ctx)
61{
62	int	ret = 0, changed = 0;
63	int	i, flags, limit, offset;
64	blk_t	*block_nr;
65	blk64_t	blk64;
66
67	limit = ctx->fs->blocksize >> 2;
68	if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
69	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) {
70		blk64 = *ind_block;
71		ret = (*ctx->func)(ctx->fs, &blk64,
72				   BLOCK_COUNT_IND, ref_block,
73				   ref_offset, ctx->priv_data);
74		*ind_block = blk64;
75	}
76	check_for_ro_violation_return(ctx, ret);
77	if (!*ind_block || (ret & BLOCK_ABORT)) {
78		ctx->bcount += limit;
79		return ret;
80	}
81	if (*ind_block >= ext2fs_blocks_count(ctx->fs->super) ||
82	    *ind_block < ctx->fs->super->s_first_data_block) {
83		ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
84		ret |= BLOCK_ERROR;
85		return ret;
86	}
87	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
88					     ctx->ind_buf);
89	if (ctx->errcode) {
90		ret |= BLOCK_ERROR;
91		return ret;
92	}
93
94	block_nr = (blk_t *) ctx->ind_buf;
95	offset = 0;
96	if (ctx->flags & BLOCK_FLAG_APPEND) {
97		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
98			blk64 = *block_nr;
99			flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
100					     *ind_block, offset,
101					     ctx->priv_data);
102			*block_nr = blk64;
103			changed	|= flags;
104			if (flags & BLOCK_ABORT) {
105				ret |= BLOCK_ABORT;
106				break;
107			}
108			offset += sizeof(blk_t);
109		}
110	} else {
111		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
112			if (*block_nr == 0)
113				continue;
114			blk64 = *block_nr;
115			flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
116					     *ind_block, offset,
117					     ctx->priv_data);
118			*block_nr = blk64;
119			changed	|= flags;
120			if (flags & BLOCK_ABORT) {
121				ret |= BLOCK_ABORT;
122				break;
123			}
124			offset += sizeof(blk_t);
125		}
126	}
127	check_for_ro_violation_return(ctx, changed);
128	if (changed & BLOCK_CHANGED) {
129		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
130						      ctx->ind_buf);
131		if (ctx->errcode)
132			ret |= BLOCK_ERROR | BLOCK_ABORT;
133	}
134	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
135	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
136	    !(ret & BLOCK_ABORT)) {
137		blk64 = *ind_block;
138		ret |= (*ctx->func)(ctx->fs, &blk64,
139				    BLOCK_COUNT_IND, ref_block,
140				    ref_offset, ctx->priv_data);
141		*ind_block = blk64;
142	}
143	check_for_ro_violation_return(ctx, ret);
144	return ret;
145}
146
147static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
148			      int ref_offset, struct block_context *ctx)
149{
150	int	ret = 0, changed = 0;
151	int	i, flags, limit, offset;
152	blk_t	*block_nr;
153	blk64_t	blk64;
154
155	limit = ctx->fs->blocksize >> 2;
156	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
157			    BLOCK_FLAG_DATA_ONLY))) {
158		blk64 = *dind_block;
159		ret = (*ctx->func)(ctx->fs, &blk64,
160				   BLOCK_COUNT_DIND, ref_block,
161				   ref_offset, ctx->priv_data);
162		*dind_block = blk64;
163	}
164	check_for_ro_violation_return(ctx, ret);
165	if (!*dind_block || (ret & BLOCK_ABORT)) {
166		ctx->bcount += limit*limit;
167		return ret;
168	}
169	if (*dind_block >= ext2fs_blocks_count(ctx->fs->super) ||
170	    *dind_block < ctx->fs->super->s_first_data_block) {
171		ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
172		ret |= BLOCK_ERROR;
173		return ret;
174	}
175	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
176					     ctx->dind_buf);
177	if (ctx->errcode) {
178		ret |= BLOCK_ERROR;
179		return ret;
180	}
181
182	block_nr = (blk_t *) ctx->dind_buf;
183	offset = 0;
184	if (ctx->flags & BLOCK_FLAG_APPEND) {
185		for (i = 0; i < limit; i++, block_nr++) {
186			flags = block_iterate_ind(block_nr,
187						  *dind_block, offset,
188						  ctx);
189			changed |= flags;
190			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
191				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
192				break;
193			}
194			offset += sizeof(blk_t);
195		}
196	} else {
197		for (i = 0; i < limit; i++, block_nr++) {
198			if (*block_nr == 0) {
199				ctx->bcount += limit;
200				continue;
201			}
202			flags = block_iterate_ind(block_nr,
203						  *dind_block, offset,
204						  ctx);
205			changed |= flags;
206			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
207				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
208				break;
209			}
210			offset += sizeof(blk_t);
211		}
212	}
213	check_for_ro_violation_return(ctx, changed);
214	if (changed & BLOCK_CHANGED) {
215		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
216						      ctx->dind_buf);
217		if (ctx->errcode)
218			ret |= BLOCK_ERROR | BLOCK_ABORT;
219	}
220	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
221	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
222	    !(ret & BLOCK_ABORT)) {
223		blk64 = *dind_block;
224		ret |= (*ctx->func)(ctx->fs, &blk64,
225				    BLOCK_COUNT_DIND, ref_block,
226				    ref_offset, ctx->priv_data);
227		*dind_block = blk64;
228	}
229	check_for_ro_violation_return(ctx, ret);
230	return ret;
231}
232
233static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
234			      int ref_offset, struct block_context *ctx)
235{
236	int	ret = 0, changed = 0;
237	int	i, flags, limit, offset;
238	blk_t	*block_nr;
239	blk64_t	blk64;
240
241	limit = ctx->fs->blocksize >> 2;
242	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
243			    BLOCK_FLAG_DATA_ONLY))) {
244		blk64 = *tind_block;
245		ret = (*ctx->func)(ctx->fs, &blk64,
246				   BLOCK_COUNT_TIND, ref_block,
247				   ref_offset, ctx->priv_data);
248		*tind_block = blk64;
249	}
250	check_for_ro_violation_return(ctx, ret);
251	if (!*tind_block || (ret & BLOCK_ABORT)) {
252		ctx->bcount += limit*limit*limit;
253		return ret;
254	}
255	if (*tind_block >= ext2fs_blocks_count(ctx->fs->super) ||
256	    *tind_block < ctx->fs->super->s_first_data_block) {
257		ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
258		ret |= BLOCK_ERROR;
259		return ret;
260	}
261	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
262					     ctx->tind_buf);
263	if (ctx->errcode) {
264		ret |= BLOCK_ERROR;
265		return ret;
266	}
267
268	block_nr = (blk_t *) ctx->tind_buf;
269	offset = 0;
270	if (ctx->flags & BLOCK_FLAG_APPEND) {
271		for (i = 0; i < limit; i++, block_nr++) {
272			flags = block_iterate_dind(block_nr,
273						   *tind_block,
274						   offset, ctx);
275			changed |= flags;
276			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
277				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
278				break;
279			}
280			offset += sizeof(blk_t);
281		}
282	} else {
283		for (i = 0; i < limit; i++, block_nr++) {
284			if (*block_nr == 0) {
285				ctx->bcount += limit*limit;
286				continue;
287			}
288			flags = block_iterate_dind(block_nr,
289						   *tind_block,
290						   offset, ctx);
291			changed |= flags;
292			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
293				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
294				break;
295			}
296			offset += sizeof(blk_t);
297		}
298	}
299	check_for_ro_violation_return(ctx, changed);
300	if (changed & BLOCK_CHANGED) {
301		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
302						      ctx->tind_buf);
303		if (ctx->errcode)
304			ret |= BLOCK_ERROR | BLOCK_ABORT;
305	}
306	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
307	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
308	    !(ret & BLOCK_ABORT)) {
309		blk64 = *tind_block;
310		ret |= (*ctx->func)(ctx->fs, &blk64,
311				    BLOCK_COUNT_TIND, ref_block,
312				    ref_offset, ctx->priv_data);
313		*tind_block = blk64;
314	}
315	check_for_ro_violation_return(ctx, ret);
316	return ret;
317}
318
319errcode_t ext2fs_block_iterate3(ext2_filsys fs,
320				ext2_ino_t ino,
321				int	flags,
322				char *block_buf,
323				int (*func)(ext2_filsys fs,
324					    blk64_t	*blocknr,
325					    e2_blkcnt_t	blockcnt,
326					    blk64_t	ref_blk,
327					    int		ref_offset,
328					    void	*priv_data),
329				void *priv_data)
330{
331	int	i;
332	int	r, ret = 0;
333	struct ext2_inode inode;
334	errcode_t	retval;
335	struct block_context ctx;
336	int	limit;
337	blk64_t	blk64;
338
339	EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
340
341	ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
342	if (ctx.errcode)
343		return ctx.errcode;
344
345	/*
346	 * Check to see if we need to limit large files
347	 */
348	if (flags & BLOCK_FLAG_NO_LARGE) {
349		if (!LINUX_S_ISDIR(inode.i_mode) &&
350		    (inode.i_size_high != 0))
351			return EXT2_ET_FILE_TOO_BIG;
352	}
353
354	limit = fs->blocksize >> 2;
355
356	ctx.fs = fs;
357	ctx.func = func;
358	ctx.priv_data = priv_data;
359	ctx.flags = flags;
360	ctx.bcount = 0;
361	if (block_buf) {
362		ctx.ind_buf = block_buf;
363	} else {
364		retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
365		if (retval)
366			return retval;
367	}
368	ctx.dind_buf = ctx.ind_buf + fs->blocksize;
369	ctx.tind_buf = ctx.dind_buf + fs->blocksize;
370
371	/*
372	 * Iterate over the HURD translator block (if present)
373	 */
374	if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
375	    !(flags & BLOCK_FLAG_DATA_ONLY)) {
376		if (inode.osd1.hurd1.h_i_translator) {
377			blk64 = inode.osd1.hurd1.h_i_translator;
378			ret |= (*ctx.func)(fs, &blk64,
379					   BLOCK_COUNT_TRANSLATOR,
380					   0, 0, priv_data);
381			inode.osd1.hurd1.h_i_translator = (blk_t) blk64;
382			if (ret & BLOCK_ABORT)
383				goto abort_exit;
384			check_for_ro_violation_goto(&ctx, ret, abort_exit);
385		}
386	}
387
388	if (inode.i_flags & EXT4_EXTENTS_FL) {
389		ext2_extent_handle_t	handle;
390		struct ext2fs_extent	extent;
391		e2_blkcnt_t		blockcnt = 0;
392		blk64_t			blk, new_blk;
393		int			op = EXT2_EXTENT_ROOT;
394		int			uninit;
395		unsigned int		j;
396
397		ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
398		if (ctx.errcode)
399			goto abort_exit;
400
401		while (1) {
402			ctx.errcode = ext2fs_extent_get(handle, op, &extent);
403			if (ctx.errcode) {
404				if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
405					break;
406				ctx.errcode = 0;
407				if (!(flags & BLOCK_FLAG_APPEND))
408					break;
409				blk = 0;
410				r = (*ctx.func)(fs, &blk, blockcnt,
411						0, 0, priv_data);
412				ret |= r;
413				check_for_ro_violation_goto(&ctx, ret,
414							    extent_errout);
415				if (r & BLOCK_CHANGED) {
416					ctx.errcode =
417						ext2fs_extent_set_bmap(handle,
418						       (blk64_t) blockcnt++,
419						       (blk64_t) blk, 0);
420					if (ctx.errcode || (ret & BLOCK_ABORT))
421						break;
422					continue;
423				}
424				break;
425			}
426
427			op = EXT2_EXTENT_NEXT;
428			blk = extent.e_pblk;
429			if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
430				if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
431					continue;
432				if ((!(extent.e_flags &
433				       EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
434				     !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
435				    ((extent.e_flags &
436				      EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
437				     (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
438					ret |= (*ctx.func)(fs, &blk,
439							   -1, 0, 0, priv_data);
440					if (ret & BLOCK_CHANGED) {
441						extent.e_pblk = blk;
442						ctx.errcode =
443				ext2fs_extent_replace(handle, 0, &extent);
444						if (ctx.errcode)
445							break;
446					}
447				}
448				continue;
449			}
450			uninit = 0;
451			if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
452				uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
453			for (blockcnt = extent.e_lblk, j = 0;
454			     j < extent.e_len;
455			     blk++, blockcnt++, j++) {
456				new_blk = blk;
457				r = (*ctx.func)(fs, &new_blk, blockcnt,
458						0, 0, priv_data);
459				ret |= r;
460				check_for_ro_violation_goto(&ctx, ret,
461							    extent_errout);
462				if (r & BLOCK_CHANGED) {
463					ctx.errcode =
464						ext2fs_extent_set_bmap(handle,
465						       (blk64_t) blockcnt,
466						       new_blk, uninit);
467					if (ctx.errcode)
468						goto extent_errout;
469				}
470				if (ret & BLOCK_ABORT)
471					break;
472			}
473		}
474
475	extent_errout:
476		ext2fs_extent_free(handle);
477		ret |= BLOCK_ERROR | BLOCK_ABORT;
478		goto errout;
479	}
480
481	/*
482	 * Iterate over normal data blocks
483	 */
484	for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
485		if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
486			blk64 = inode.i_block[i];
487			ret |= (*ctx.func)(fs, &blk64, ctx.bcount, 0, i,
488					   priv_data);
489			inode.i_block[i] = (blk_t) blk64;
490			if (ret & BLOCK_ABORT)
491				goto abort_exit;
492		}
493	}
494	check_for_ro_violation_goto(&ctx, ret, abort_exit);
495	if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
496		ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
497					 0, EXT2_IND_BLOCK, &ctx);
498		if (ret & BLOCK_ABORT)
499			goto abort_exit;
500	} else
501		ctx.bcount += limit;
502	if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
503		ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
504					  0, EXT2_DIND_BLOCK, &ctx);
505		if (ret & BLOCK_ABORT)
506			goto abort_exit;
507	} else
508		ctx.bcount += limit * limit;
509	if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
510		ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
511					  0, EXT2_TIND_BLOCK, &ctx);
512		if (ret & BLOCK_ABORT)
513			goto abort_exit;
514	}
515
516abort_exit:
517	if (ret & BLOCK_CHANGED) {
518		retval = ext2fs_write_inode(fs, ino, &inode);
519		if (retval) {
520			ret |= BLOCK_ERROR;
521			ctx.errcode = retval;
522		}
523	}
524errout:
525	if (!block_buf)
526		ext2fs_free_mem(&ctx.ind_buf);
527
528	return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
529}
530
531/*
532 * Emulate the old ext2fs_block_iterate function!
533 */
534
535struct xlate64 {
536	int (*func)(ext2_filsys fs,
537		    blk_t	*blocknr,
538		    e2_blkcnt_t	blockcnt,
539		    blk_t	ref_blk,
540		    int		ref_offset,
541		    void	*priv_data);
542	void *real_private;
543};
544
545static int xlate64_func(ext2_filsys fs, blk64_t	*blocknr,
546			e2_blkcnt_t blockcnt, blk64_t ref_blk,
547			int ref_offset, void *priv_data)
548{
549	struct xlate64 *xl = (struct xlate64 *) priv_data;
550	int		ret;
551	blk_t		block32 = *blocknr;
552
553	ret = (*xl->func)(fs, &block32, blockcnt, (blk_t) ref_blk, ref_offset,
554			     xl->real_private);
555	*blocknr = block32;
556	return ret;
557}
558
559errcode_t ext2fs_block_iterate2(ext2_filsys fs,
560				ext2_ino_t ino,
561				int	flags,
562				char *block_buf,
563				int (*func)(ext2_filsys fs,
564					    blk_t	*blocknr,
565					    e2_blkcnt_t	blockcnt,
566					    blk_t	ref_blk,
567					    int		ref_offset,
568					    void	*priv_data),
569				void *priv_data)
570{
571	struct xlate64 xl;
572
573	xl.real_private = priv_data;
574	xl.func = func;
575
576	return ext2fs_block_iterate3(fs, ino, flags, block_buf,
577				     xlate64_func, &xl);
578}
579
580
581struct xlate {
582	int (*func)(ext2_filsys	fs,
583		    blk_t	*blocknr,
584		    int		bcount,
585		    void	*priv_data);
586	void *real_private;
587};
588
589#ifdef __TURBOC__
590 #pragma argsused
591#endif
592static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
593		      blk_t ref_block EXT2FS_ATTR((unused)),
594		      int ref_offset EXT2FS_ATTR((unused)),
595		      void *priv_data)
596{
597	struct xlate *xl = (struct xlate *) priv_data;
598
599	return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
600}
601
602errcode_t ext2fs_block_iterate(ext2_filsys fs,
603			       ext2_ino_t ino,
604			       int	flags,
605			       char *block_buf,
606			       int (*func)(ext2_filsys fs,
607					   blk_t	*blocknr,
608					   int	blockcnt,
609					   void	*priv_data),
610			       void *priv_data)
611{
612	struct xlate xl;
613
614	xl.real_private = priv_data;
615	xl.func = func;
616
617	return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
618				     block_buf, xlate_func, &xl);
619}
620
621