block.c revision 07f1a070ff45c8381c3ddf8552c726525104e1ee
1/* 2 * block.c --- iterate over all blocks in an inode 3 * 4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o. 5 * 6 * %Begin-Header% 7 * This file may be redistributed under the terms of the GNU Public 8 * License. 9 * %End-Header% 10 */ 11 12#include <stdio.h> 13#include <string.h> 14#if HAVE_UNISTD_H 15#include <unistd.h> 16#endif 17 18#include "ext2_fs.h" 19#include "ext2fs.h" 20 21struct block_context { 22 ext2_filsys fs; 23 int (*func)(ext2_filsys fs, 24 blk_t *blocknr, 25 e2_blkcnt_t bcount, 26 blk_t ref_blk, 27 int ref_offset, 28 void *priv_data); 29 e2_blkcnt_t bcount; 30 int bsize; 31 int flags; 32 errcode_t errcode; 33 char *ind_buf; 34 char *dind_buf; 35 char *tind_buf; 36 void *priv_data; 37}; 38 39#define check_for_ro_violation_return(ctx, ret) \ 40 do { \ 41 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \ 42 ((ret) & BLOCK_CHANGED)) { \ 43 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \ 44 ret |= BLOCK_ABORT | BLOCK_ERROR; \ 45 return ret; \ 46 } \ 47 } while (0) 48 49#define check_for_ro_violation_goto(ctx, ret, label) \ 50 do { \ 51 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \ 52 ((ret) & BLOCK_CHANGED)) { \ 53 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \ 54 ret |= BLOCK_ABORT | BLOCK_ERROR; \ 55 goto label; \ 56 } \ 57 } while (0) 58 59static int block_iterate_ind(blk_t *ind_block, blk_t ref_block, 60 int ref_offset, struct block_context *ctx) 61{ 62 int ret = 0, changed = 0; 63 int i, flags, limit, offset; 64 blk_t *block_nr; 65 66 limit = ctx->fs->blocksize >> 2; 67 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && 68 !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) 69 ret = (*ctx->func)(ctx->fs, ind_block, 70 BLOCK_COUNT_IND, ref_block, 71 ref_offset, ctx->priv_data); 72 check_for_ro_violation_return(ctx, ret); 73 if (!*ind_block || (ret & BLOCK_ABORT)) { 74 ctx->bcount += limit; 75 return ret; 76 } 77 if (*ind_block >= ctx->fs->super->s_blocks_count || 78 *ind_block < ctx->fs->super->s_first_data_block) { 79 ctx->errcode = EXT2_ET_BAD_IND_BLOCK; 80 ret |= BLOCK_ERROR; 81 return ret; 82 } 83 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block, 84 ctx->ind_buf); 85 if (ctx->errcode) { 86 ret |= BLOCK_ERROR; 87 return ret; 88 } 89 90 block_nr = (blk_t *) ctx->ind_buf; 91 offset = 0; 92 if (ctx->flags & BLOCK_FLAG_APPEND) { 93 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) { 94 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount, 95 *ind_block, offset, 96 ctx->priv_data); 97 changed |= flags; 98 if (flags & BLOCK_ABORT) { 99 ret |= BLOCK_ABORT; 100 break; 101 } 102 offset += sizeof(blk_t); 103 } 104 } else { 105 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) { 106 if (*block_nr == 0) 107 continue; 108 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount, 109 *ind_block, offset, 110 ctx->priv_data); 111 changed |= flags; 112 if (flags & BLOCK_ABORT) { 113 ret |= BLOCK_ABORT; 114 break; 115 } 116 offset += sizeof(blk_t); 117 } 118 } 119 check_for_ro_violation_return(ctx, changed); 120 if (changed & BLOCK_CHANGED) { 121 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block, 122 ctx->ind_buf); 123 if (ctx->errcode) 124 ret |= BLOCK_ERROR | BLOCK_ABORT; 125 } 126 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && 127 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) && 128 !(ret & BLOCK_ABORT)) 129 ret |= (*ctx->func)(ctx->fs, ind_block, 130 BLOCK_COUNT_IND, ref_block, 131 ref_offset, ctx->priv_data); 132 check_for_ro_violation_return(ctx, ret); 133 return ret; 134} 135 136static int block_iterate_dind(blk_t *dind_block, blk_t ref_block, 137 int ref_offset, struct block_context *ctx) 138{ 139 int ret = 0, changed = 0; 140 int i, flags, limit, offset; 141 blk_t *block_nr; 142 143 limit = ctx->fs->blocksize >> 2; 144 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE | 145 BLOCK_FLAG_DATA_ONLY))) 146 ret = (*ctx->func)(ctx->fs, dind_block, 147 BLOCK_COUNT_DIND, ref_block, 148 ref_offset, ctx->priv_data); 149 check_for_ro_violation_return(ctx, ret); 150 if (!*dind_block || (ret & BLOCK_ABORT)) { 151 ctx->bcount += limit*limit; 152 return ret; 153 } 154 if (*dind_block >= ctx->fs->super->s_blocks_count || 155 *dind_block < ctx->fs->super->s_first_data_block) { 156 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK; 157 ret |= BLOCK_ERROR; 158 return ret; 159 } 160 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block, 161 ctx->dind_buf); 162 if (ctx->errcode) { 163 ret |= BLOCK_ERROR; 164 return ret; 165 } 166 167 block_nr = (blk_t *) ctx->dind_buf; 168 offset = 0; 169 if (ctx->flags & BLOCK_FLAG_APPEND) { 170 for (i = 0; i < limit; i++, block_nr++) { 171 flags = block_iterate_ind(block_nr, 172 *dind_block, offset, 173 ctx); 174 changed |= flags; 175 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { 176 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); 177 break; 178 } 179 offset += sizeof(blk_t); 180 } 181 } else { 182 for (i = 0; i < limit; i++, block_nr++) { 183 if (*block_nr == 0) { 184 ctx->bcount += limit; 185 continue; 186 } 187 flags = block_iterate_ind(block_nr, 188 *dind_block, offset, 189 ctx); 190 changed |= flags; 191 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { 192 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); 193 break; 194 } 195 offset += sizeof(blk_t); 196 } 197 } 198 check_for_ro_violation_return(ctx, changed); 199 if (changed & BLOCK_CHANGED) { 200 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block, 201 ctx->dind_buf); 202 if (ctx->errcode) 203 ret |= BLOCK_ERROR | BLOCK_ABORT; 204 } 205 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && 206 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) && 207 !(ret & BLOCK_ABORT)) 208 ret |= (*ctx->func)(ctx->fs, dind_block, 209 BLOCK_COUNT_DIND, ref_block, 210 ref_offset, ctx->priv_data); 211 check_for_ro_violation_return(ctx, ret); 212 return ret; 213} 214 215static int block_iterate_tind(blk_t *tind_block, blk_t ref_block, 216 int ref_offset, struct block_context *ctx) 217{ 218 int ret = 0, changed = 0; 219 int i, flags, limit, offset; 220 blk_t *block_nr; 221 222 limit = ctx->fs->blocksize >> 2; 223 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE | 224 BLOCK_FLAG_DATA_ONLY))) 225 ret = (*ctx->func)(ctx->fs, tind_block, 226 BLOCK_COUNT_TIND, ref_block, 227 ref_offset, ctx->priv_data); 228 check_for_ro_violation_return(ctx, ret); 229 if (!*tind_block || (ret & BLOCK_ABORT)) { 230 ctx->bcount += limit*limit*limit; 231 return ret; 232 } 233 if (*tind_block >= ctx->fs->super->s_blocks_count || 234 *tind_block < ctx->fs->super->s_first_data_block) { 235 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK; 236 ret |= BLOCK_ERROR; 237 return ret; 238 } 239 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block, 240 ctx->tind_buf); 241 if (ctx->errcode) { 242 ret |= BLOCK_ERROR; 243 return ret; 244 } 245 246 block_nr = (blk_t *) ctx->tind_buf; 247 offset = 0; 248 if (ctx->flags & BLOCK_FLAG_APPEND) { 249 for (i = 0; i < limit; i++, block_nr++) { 250 flags = block_iterate_dind(block_nr, 251 *tind_block, 252 offset, ctx); 253 changed |= flags; 254 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { 255 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); 256 break; 257 } 258 offset += sizeof(blk_t); 259 } 260 } else { 261 for (i = 0; i < limit; i++, block_nr++) { 262 if (*block_nr == 0) { 263 ctx->bcount += limit*limit; 264 continue; 265 } 266 flags = block_iterate_dind(block_nr, 267 *tind_block, 268 offset, ctx); 269 changed |= flags; 270 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { 271 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); 272 break; 273 } 274 offset += sizeof(blk_t); 275 } 276 } 277 check_for_ro_violation_return(ctx, changed); 278 if (changed & BLOCK_CHANGED) { 279 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block, 280 ctx->tind_buf); 281 if (ctx->errcode) 282 ret |= BLOCK_ERROR | BLOCK_ABORT; 283 } 284 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && 285 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) && 286 !(ret & BLOCK_ABORT)) 287 ret |= (*ctx->func)(ctx->fs, tind_block, 288 BLOCK_COUNT_TIND, ref_block, 289 ref_offset, ctx->priv_data); 290 check_for_ro_violation_return(ctx, ret); 291 return ret; 292} 293 294errcode_t ext2fs_block_iterate2(ext2_filsys fs, 295 ext2_ino_t ino, 296 int flags, 297 char *block_buf, 298 int (*func)(ext2_filsys fs, 299 blk_t *blocknr, 300 e2_blkcnt_t blockcnt, 301 blk_t ref_blk, 302 int ref_offset, 303 void *priv_data), 304 void *priv_data) 305{ 306 int i; 307 int r, ret = 0; 308 struct ext2_inode inode; 309 errcode_t retval; 310 struct block_context ctx; 311 int limit; 312 313 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS); 314 315 ctx.errcode = ext2fs_read_inode(fs, ino, &inode); 316 if (ctx.errcode) 317 return ctx.errcode; 318 319 /* 320 * Check to see if we need to limit large files 321 */ 322 if (flags & BLOCK_FLAG_NO_LARGE) { 323 if (!LINUX_S_ISDIR(inode.i_mode) && 324 (inode.i_size_high != 0)) 325 return EXT2_ET_FILE_TOO_BIG; 326 } 327 328 limit = fs->blocksize >> 2; 329 330 ctx.fs = fs; 331 ctx.func = func; 332 ctx.priv_data = priv_data; 333 ctx.flags = flags; 334 ctx.bcount = 0; 335 if (block_buf) { 336 ctx.ind_buf = block_buf; 337 } else { 338 retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf); 339 if (retval) 340 return retval; 341 } 342 ctx.dind_buf = ctx.ind_buf + fs->blocksize; 343 ctx.tind_buf = ctx.dind_buf + fs->blocksize; 344 345 /* 346 * Iterate over the HURD translator block (if present) 347 */ 348 if ((fs->super->s_creator_os == EXT2_OS_HURD) && 349 !(flags & BLOCK_FLAG_DATA_ONLY)) { 350 if (inode.osd1.hurd1.h_i_translator) { 351 ret |= (*ctx.func)(fs, 352 &inode.osd1.hurd1.h_i_translator, 353 BLOCK_COUNT_TRANSLATOR, 354 0, 0, priv_data); 355 if (ret & BLOCK_ABORT) 356 goto abort_exit; 357 check_for_ro_violation_goto(&ctx, ret, abort_exit); 358 } 359 } 360 361 if (inode.i_flags & EXT4_EXTENTS_FL) { 362 ext2_extent_handle_t handle; 363 struct ext2fs_extent extent; 364 e2_blkcnt_t blockcnt = 0; 365 blk_t blk, new_blk; 366 int op = EXT2_EXTENT_ROOT; 367 int uninit; 368 unsigned int j; 369 370 ctx.errcode = ext2fs_extent_open(fs, ino, &handle); 371 if (ctx.errcode) 372 goto abort_exit; 373 374 while (1) { 375 ctx.errcode = ext2fs_extent_get(handle, op, &extent); 376 if (ctx.errcode) { 377 if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT) 378 break; 379 ctx.errcode = 0; 380 if (!(flags & BLOCK_FLAG_APPEND)) 381 break; 382 blk = 0; 383 r = (*ctx.func)(fs, &blk, blockcnt, 384 0, 0, priv_data); 385 ret |= r; 386 check_for_ro_violation_goto(&ctx, ret, 387 extent_errout); 388 if (r & BLOCK_CHANGED) { 389 ctx.errcode = 390 ext2fs_extent_set_bmap(handle, 391 (blk64_t) blockcnt++, 392 (blk64_t) blk, 0); 393 if (ctx.errcode || (ret & BLOCK_ABORT)) 394 break; 395 continue; 396 } 397 break; 398 } 399 400 op = EXT2_EXTENT_NEXT; 401 blk = extent.e_pblk; 402 if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) { 403 if (ctx.flags & BLOCK_FLAG_DATA_ONLY) 404 continue; 405 if ((!(extent.e_flags & 406 EXT2_EXTENT_FLAGS_SECOND_VISIT) && 407 !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) || 408 ((extent.e_flags & 409 EXT2_EXTENT_FLAGS_SECOND_VISIT) && 410 (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) { 411 ret |= (*ctx.func)(fs, &blk, 412 -1, 0, 0, priv_data); 413 if (ret & BLOCK_CHANGED) { 414 extent.e_pblk = blk; 415 ctx.errcode = 416 ext2fs_extent_replace(handle, 0, &extent); 417 if (ctx.errcode) 418 break; 419 } 420 } 421 continue; 422 } 423 uninit = 0; 424 if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT) 425 uninit = EXT2_EXTENT_SET_BMAP_UNINIT; 426 for (blockcnt = extent.e_lblk, j = 0; 427 j < extent.e_len; 428 blk++, blockcnt++, j++) { 429 new_blk = blk; 430 r = (*ctx.func)(fs, &new_blk, blockcnt, 431 0, 0, priv_data); 432 ret |= r; 433 check_for_ro_violation_goto(&ctx, ret, 434 extent_errout); 435 if (r & BLOCK_CHANGED) { 436 ctx.errcode = 437 ext2fs_extent_set_bmap(handle, 438 (blk64_t) blockcnt, 439 (blk64_t) new_blk, 440 uninit); 441 if (ctx.errcode) 442 goto extent_errout; 443 } 444 if (ret & BLOCK_ABORT) 445 break; 446 } 447 } 448 449 extent_errout: 450 ext2fs_extent_free(handle); 451 ret |= BLOCK_ERROR | BLOCK_ABORT; 452 goto errout; 453 } 454 455 /* 456 * Iterate over normal data blocks 457 */ 458 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) { 459 if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) { 460 ret |= (*ctx.func)(fs, &inode.i_block[i], 461 ctx.bcount, 0, i, priv_data); 462 if (ret & BLOCK_ABORT) 463 goto abort_exit; 464 } 465 } 466 check_for_ro_violation_goto(&ctx, ret, abort_exit); 467 if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) { 468 ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK], 469 0, EXT2_IND_BLOCK, &ctx); 470 if (ret & BLOCK_ABORT) 471 goto abort_exit; 472 } else 473 ctx.bcount += limit; 474 if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) { 475 ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK], 476 0, EXT2_DIND_BLOCK, &ctx); 477 if (ret & BLOCK_ABORT) 478 goto abort_exit; 479 } else 480 ctx.bcount += limit * limit; 481 if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) { 482 ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK], 483 0, EXT2_TIND_BLOCK, &ctx); 484 if (ret & BLOCK_ABORT) 485 goto abort_exit; 486 } 487 488abort_exit: 489 if (ret & BLOCK_CHANGED) { 490 retval = ext2fs_write_inode(fs, ino, &inode); 491 if (retval) 492 return retval; 493 } 494errout: 495 if (!block_buf) 496 ext2fs_free_mem(&ctx.ind_buf); 497 498 return (ret & BLOCK_ERROR) ? ctx.errcode : 0; 499} 500 501/* 502 * Emulate the old ext2fs_block_iterate function! 503 */ 504 505struct xlate { 506 int (*func)(ext2_filsys fs, 507 blk_t *blocknr, 508 int bcount, 509 void *priv_data); 510 void *real_private; 511}; 512 513#ifdef __TURBOC__ 514 #pragma argsused 515#endif 516static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt, 517 blk_t ref_block EXT2FS_ATTR((unused)), 518 int ref_offset EXT2FS_ATTR((unused)), 519 void *priv_data) 520{ 521 struct xlate *xl = (struct xlate *) priv_data; 522 523 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private); 524} 525 526errcode_t ext2fs_block_iterate(ext2_filsys fs, 527 ext2_ino_t ino, 528 int flags, 529 char *block_buf, 530 int (*func)(ext2_filsys fs, 531 blk_t *blocknr, 532 int blockcnt, 533 void *priv_data), 534 void *priv_data) 535{ 536 struct xlate xl; 537 538 xl.real_private = priv_data; 539 xl.func = func; 540 541 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags, 542 block_buf, xlate_func, &xl); 543} 544 545