block.c revision 9f8046fc6dfc13eee2f5c363214e60b533872cac
1/*
2 * block.c --- iterate over all blocks in an inode
3 *
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
5 *
6 * %Begin-Header%
7 * This file may be redistributed under the terms of the GNU Public
8 * License.
9 * %End-Header%
10 */
11
12#include <stdio.h>
13#include <string.h>
14#if HAVE_UNISTD_H
15#include <unistd.h>
16#endif
17
18#include "ext2_fs.h"
19#include "ext2fs.h"
20
21struct block_context {
22	ext2_filsys	fs;
23	int (*func)(ext2_filsys	fs,
24		    blk_t	*blocknr,
25		    e2_blkcnt_t	bcount,
26		    blk_t	ref_blk,
27		    int		ref_offset,
28		    void	*priv_data);
29	e2_blkcnt_t	bcount;
30	int		bsize;
31	int		flags;
32	errcode_t	errcode;
33	char	*ind_buf;
34	char	*dind_buf;
35	char	*tind_buf;
36	void	*priv_data;
37};
38
39static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
40			     int ref_offset, struct block_context *ctx)
41{
42	int	ret = 0, changed = 0;
43	int	i, flags, limit, offset;
44	blk_t	*block_nr;
45
46	limit = ctx->fs->blocksize >> 2;
47	if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
48	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
49		ret = (*ctx->func)(ctx->fs, ind_block,
50				   BLOCK_COUNT_IND, ref_block,
51				   ref_offset, ctx->priv_data);
52	if (!*ind_block || (ret & BLOCK_ABORT)) {
53		ctx->bcount += limit;
54		return ret;
55	}
56	if (*ind_block >= ctx->fs->super->s_blocks_count ||
57	    *ind_block < ctx->fs->super->s_first_data_block) {
58		ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
59		ret |= BLOCK_ERROR;
60		return ret;
61	}
62	if (ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) {
63		ctx->errcode = 0;
64		memset(ctx->ind_buf, 0, ctx->fs->blocksize);
65	} else
66		ctx->errcode = io_channel_read_blk(ctx->fs->io, *ind_block,
67						   1, ctx->ind_buf);
68	if (ctx->errcode) {
69		ret |= BLOCK_ERROR;
70		return ret;
71	}
72	if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
73			      EXT2_FLAG_SWAP_BYTES_READ)) {
74		block_nr = (blk_t *) ctx->ind_buf;
75		for (i = 0; i < limit; i++, block_nr++)
76			*block_nr = ext2fs_swab32(*block_nr);
77	}
78	block_nr = (blk_t *) ctx->ind_buf;
79	offset = 0;
80	if (ctx->flags & BLOCK_FLAG_APPEND) {
81		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
82			flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
83					     *ind_block, offset,
84					     ctx->priv_data);
85			changed	|= flags;
86			if (flags & BLOCK_ABORT) {
87				ret |= BLOCK_ABORT;
88				break;
89			}
90			offset += sizeof(blk_t);
91		}
92	} else {
93		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
94			if (*block_nr == 0)
95				continue;
96			flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
97					     *ind_block, offset,
98					     ctx->priv_data);
99			changed	|= flags;
100			if (flags & BLOCK_ABORT) {
101				ret |= BLOCK_ABORT;
102				break;
103			}
104			offset += sizeof(blk_t);
105		}
106	}
107	if (!(ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) &&
108	    (changed & BLOCK_CHANGED)) {
109		if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
110				      EXT2_FLAG_SWAP_BYTES_WRITE)) {
111			block_nr = (blk_t *) ctx->ind_buf;
112			for (i = 0; i < limit; i++, block_nr++)
113				*block_nr = ext2fs_swab32(*block_nr);
114		}
115		ctx->errcode = io_channel_write_blk(ctx->fs->io, *ind_block,
116						    1, ctx->ind_buf);
117		if (ctx->errcode)
118			ret |= BLOCK_ERROR | BLOCK_ABORT;
119	}
120	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
121	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
122	    !(ret & BLOCK_ABORT))
123		ret |= (*ctx->func)(ctx->fs, ind_block,
124				    BLOCK_COUNT_IND, ref_block,
125				    ref_offset, ctx->priv_data);
126	return ret;
127}
128
129static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
130			      int ref_offset, struct block_context *ctx)
131{
132	int	ret = 0, changed = 0;
133	int	i, flags, limit, offset;
134	blk_t	*block_nr;
135
136	limit = ctx->fs->blocksize >> 2;
137	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
138			    BLOCK_FLAG_DATA_ONLY)))
139		ret = (*ctx->func)(ctx->fs, dind_block,
140				   BLOCK_COUNT_DIND, ref_block,
141				   ref_offset, ctx->priv_data);
142	if (!*dind_block || (ret & BLOCK_ABORT)) {
143		ctx->bcount += limit*limit;
144		return ret;
145	}
146	if (*dind_block >= ctx->fs->super->s_blocks_count ||
147	    *dind_block < ctx->fs->super->s_first_data_block) {
148		ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
149		ret |= BLOCK_ERROR;
150		return ret;
151	}
152	if (ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) {
153		ctx->errcode = 0;
154		memset(ctx->dind_buf, 0, ctx->fs->blocksize);
155	} else
156		ctx->errcode = io_channel_read_blk(ctx->fs->io, *dind_block,
157						   1, ctx->dind_buf);
158	if (ctx->errcode) {
159		ret |= BLOCK_ERROR;
160		return ret;
161	}
162	if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
163			      EXT2_FLAG_SWAP_BYTES_READ)) {
164		block_nr = (blk_t *) ctx->dind_buf;
165		for (i = 0; i < limit; i++, block_nr++)
166			*block_nr = ext2fs_swab32(*block_nr);
167	}
168	block_nr = (blk_t *) ctx->dind_buf;
169	offset = 0;
170	if (ctx->flags & BLOCK_FLAG_APPEND) {
171		for (i = 0; i < limit; i++, block_nr++) {
172			flags = block_iterate_ind(block_nr,
173						  *dind_block, offset,
174						  ctx);
175			changed |= flags;
176			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
177				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
178				break;
179			}
180			offset += sizeof(blk_t);
181		}
182	} else {
183		for (i = 0; i < limit; i++, block_nr++) {
184			if (*block_nr == 0) {
185				ctx->bcount += limit;
186				continue;
187			}
188			flags = block_iterate_ind(block_nr,
189						  *dind_block, offset,
190						  ctx);
191			changed |= flags;
192			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
193				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
194				break;
195			}
196			offset += sizeof(blk_t);
197		}
198	}
199	if (!(ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) &&
200	    (changed & BLOCK_CHANGED)) {
201		if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
202				      EXT2_FLAG_SWAP_BYTES_WRITE)) {
203			block_nr = (blk_t *) ctx->dind_buf;
204			for (i = 0; i < limit; i++, block_nr++)
205				*block_nr = ext2fs_swab32(*block_nr);
206		}
207		ctx->errcode = io_channel_write_blk(ctx->fs->io, *dind_block,
208						    1, ctx->dind_buf);
209		if (ctx->errcode)
210			ret |= BLOCK_ERROR | BLOCK_ABORT;
211	}
212	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
213	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
214	    !(ret & BLOCK_ABORT))
215		ret |= (*ctx->func)(ctx->fs, dind_block,
216				    BLOCK_COUNT_DIND, ref_block,
217				    ref_offset, ctx->priv_data);
218	return ret;
219}
220
221static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
222			      int ref_offset, struct block_context *ctx)
223{
224	int	ret = 0, changed = 0;
225	int	i, flags, limit, offset;
226	blk_t	*block_nr;
227
228	limit = ctx->fs->blocksize >> 2;
229	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
230			    BLOCK_FLAG_DATA_ONLY)))
231		ret = (*ctx->func)(ctx->fs, tind_block,
232				   BLOCK_COUNT_TIND, ref_block,
233				   ref_offset, ctx->priv_data);
234	if (!*tind_block || (ret & BLOCK_ABORT)) {
235		ctx->bcount += limit*limit*limit;
236		return ret;
237	}
238	if (*tind_block >= ctx->fs->super->s_blocks_count ||
239	    *tind_block < ctx->fs->super->s_first_data_block) {
240		ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
241		ret |= BLOCK_ERROR;
242		return ret;
243	}
244	if (ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) {
245		ctx->errcode = 0;
246		memset(ctx->tind_buf, 0, ctx->fs->blocksize);
247	} else
248		ctx->errcode = io_channel_read_blk(ctx->fs->io, *tind_block,
249						   1, ctx->tind_buf);
250	if (ctx->errcode) {
251		ret |= BLOCK_ERROR;
252		return ret;
253	}
254	if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
255			      EXT2_FLAG_SWAP_BYTES_READ)) {
256		block_nr = (blk_t *) ctx->tind_buf;
257		for (i = 0; i < limit; i++, block_nr++)
258			*block_nr = ext2fs_swab32(*block_nr);
259	}
260	block_nr = (blk_t *) ctx->tind_buf;
261	offset = 0;
262	if (ctx->flags & BLOCK_FLAG_APPEND) {
263		for (i = 0; i < limit; i++, block_nr++) {
264			flags = block_iterate_dind(block_nr,
265						   *tind_block,
266						   offset, ctx);
267			changed |= flags;
268			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
269				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
270				break;
271			}
272			offset += sizeof(blk_t);
273		}
274	} else {
275		for (i = 0; i < limit; i++, block_nr++) {
276			if (*block_nr == 0) {
277				ctx->bcount += limit*limit;
278				continue;
279			}
280			flags = block_iterate_dind(block_nr,
281						   *tind_block,
282						   offset, ctx);
283			changed |= flags;
284			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
285				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
286				break;
287			}
288			offset += sizeof(blk_t);
289		}
290	}
291	if (!(ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) &&
292	    (changed & BLOCK_CHANGED)) {
293		if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
294				      EXT2_FLAG_SWAP_BYTES_WRITE)) {
295			block_nr = (blk_t *) ctx->tind_buf;
296			for (i = 0; i < limit; i++, block_nr++)
297				*block_nr = ext2fs_swab32(*block_nr);
298		}
299		ctx->errcode = io_channel_write_blk(ctx->fs->io, *tind_block,
300						    1, ctx->tind_buf);
301		if (ctx->errcode)
302			ret |= BLOCK_ERROR | BLOCK_ABORT;
303	}
304	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
305	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
306	    !(ret & BLOCK_ABORT))
307		ret |= (*ctx->func)(ctx->fs, tind_block,
308				    BLOCK_COUNT_TIND, ref_block,
309				    ref_offset, ctx->priv_data);
310
311	return ret;
312}
313
314errcode_t ext2fs_block_iterate2(ext2_filsys fs,
315				ext2_ino_t ino,
316				int	flags,
317				char *block_buf,
318				int (*func)(ext2_filsys fs,
319					    blk_t	*blocknr,
320					    e2_blkcnt_t	blockcnt,
321					    blk_t	ref_blk,
322					    int		ref_offset,
323					    void	*priv_data),
324				void *priv_data)
325{
326	int	i;
327	int	got_inode = 0;
328	int	ret = 0;
329	blk_t	blocks[EXT2_N_BLOCKS];	/* directory data blocks */
330	struct ext2_inode inode;
331	errcode_t	retval;
332	struct block_context ctx;
333	int	limit;
334
335	EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
336
337	/*
338	 * Check to see if we need to limit large files
339	 */
340	if (flags & BLOCK_FLAG_NO_LARGE) {
341		ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
342		if (ctx.errcode)
343			return ctx.errcode;
344		got_inode = 1;
345		if (!LINUX_S_ISDIR(inode.i_mode) &&
346		    (inode.i_size_high != 0))
347			return EXT2_ET_FILE_TOO_BIG;
348	}
349
350	retval = ext2fs_get_blocks(fs, ino, blocks);
351	if (retval)
352		return retval;
353
354	limit = fs->blocksize >> 2;
355
356	ctx.fs = fs;
357	ctx.func = func;
358	ctx.priv_data = priv_data;
359	ctx.flags = flags;
360	ctx.bcount = 0;
361	if (block_buf) {
362		ctx.ind_buf = block_buf;
363	} else {
364		retval = ext2fs_get_mem(fs->blocksize * 3,
365					(void **) &ctx.ind_buf);
366		if (retval)
367			return retval;
368	}
369	ctx.dind_buf = ctx.ind_buf + fs->blocksize;
370	ctx.tind_buf = ctx.dind_buf + fs->blocksize;
371
372	/*
373	 * Iterate over the HURD translator block (if present)
374	 */
375	if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
376	    !(flags & BLOCK_FLAG_DATA_ONLY)) {
377		ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
378		if (ctx.errcode)
379			goto abort_exit;
380		got_inode = 1;
381		if (inode.osd1.hurd1.h_i_translator) {
382			ret |= (*ctx.func)(fs,
383					   &inode.osd1.hurd1.h_i_translator,
384					   BLOCK_COUNT_TRANSLATOR,
385					   0, 0, priv_data);
386			if (ret & BLOCK_ABORT)
387				goto abort_exit;
388		}
389	}
390
391	/*
392	 * Iterate over normal data blocks
393	 */
394	for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
395		if (blocks[i] || (flags & BLOCK_FLAG_APPEND)) {
396			ret |= (*ctx.func)(fs, &blocks[i],
397					    ctx.bcount, 0, i, priv_data);
398			if (ret & BLOCK_ABORT)
399				goto abort_exit;
400		}
401	}
402	if (*(blocks + EXT2_IND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
403		ret |= block_iterate_ind(blocks + EXT2_IND_BLOCK,
404					 0, EXT2_IND_BLOCK, &ctx);
405		if (ret & BLOCK_ABORT)
406			goto abort_exit;
407	} else
408		ctx.bcount += limit;
409	if (*(blocks + EXT2_DIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
410		ret |= block_iterate_dind(blocks + EXT2_DIND_BLOCK,
411					  0, EXT2_DIND_BLOCK, &ctx);
412		if (ret & BLOCK_ABORT)
413			goto abort_exit;
414	} else
415		ctx.bcount += limit * limit;
416	if (*(blocks + EXT2_TIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
417		ret |= block_iterate_tind(blocks + EXT2_TIND_BLOCK,
418					  0, EXT2_TIND_BLOCK, &ctx);
419		if (ret & BLOCK_ABORT)
420			goto abort_exit;
421	}
422
423abort_exit:
424	if (ret & BLOCK_CHANGED) {
425		if (!got_inode) {
426			retval = ext2fs_read_inode(fs, ino, &inode);
427			if (retval)
428				return retval;
429		}
430		for (i=0; i < EXT2_N_BLOCKS; i++)
431			inode.i_block[i] = blocks[i];
432		retval = ext2fs_write_inode(fs, ino, &inode);
433		if (retval)
434			return retval;
435	}
436
437	if (!block_buf)
438		ext2fs_free_mem((void **) &ctx.ind_buf);
439
440	return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
441}
442
443/*
444 * Emulate the old ext2fs_block_iterate function!
445 */
446
447struct xlate {
448	int (*func)(ext2_filsys	fs,
449		    blk_t	*blocknr,
450		    int		bcount,
451		    void	*priv_data);
452	void *real_private;
453};
454
455#ifdef __TURBOC__
456 #pragma argsused
457#endif
458static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
459		      blk_t ref_block, int ref_offset, void *priv_data)
460{
461	struct xlate *xl = (struct xlate *) priv_data;
462
463	return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
464}
465
466errcode_t ext2fs_block_iterate(ext2_filsys fs,
467			       ext2_ino_t ino,
468			       int	flags,
469			       char *block_buf,
470			       int (*func)(ext2_filsys fs,
471					   blk_t	*blocknr,
472					   int	blockcnt,
473					   void	*priv_data),
474			       void *priv_data)
475{
476	struct xlate xl;
477
478	xl.real_private = priv_data;
479	xl.func = func;
480
481	return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
482				     block_buf, xlate_func, &xl);
483}
484
485