block.c revision d3a8fc5ae68477118e32813230518bf4ccc73bf9
1/* 2 * block.c --- iterate over all blocks in an inode 3 * 4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o. 5 * 6 * %Begin-Header% 7 * This file may be redistributed under the terms of the GNU Public 8 * License. 9 * %End-Header% 10 */ 11 12#include <stdio.h> 13#include <string.h> 14#if HAVE_UNISTD_H 15#include <unistd.h> 16#endif 17 18#include "ext2_fs.h" 19#include "ext2fs.h" 20 21struct block_context { 22 ext2_filsys fs; 23 int (*func)(ext2_filsys fs, 24 blk_t *blocknr, 25 e2_blkcnt_t bcount, 26 blk_t ref_blk, 27 int ref_offset, 28 void *priv_data); 29 e2_blkcnt_t bcount; 30 int bsize; 31 int flags; 32 errcode_t errcode; 33 char *ind_buf; 34 char *dind_buf; 35 char *tind_buf; 36 void *priv_data; 37}; 38 39#define check_for_ro_violation_return(ctx, ret) \ 40 do { \ 41 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \ 42 ((ret) & BLOCK_CHANGED)) { \ 43 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \ 44 ret |= BLOCK_ABORT | BLOCK_ERROR; \ 45 return ret; \ 46 } \ 47 } while (0) 48 49#define check_for_ro_violation_goto(ctx, ret, label) \ 50 do { \ 51 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \ 52 ((ret) & BLOCK_CHANGED)) { \ 53 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \ 54 ret |= BLOCK_ABORT | BLOCK_ERROR; \ 55 goto label; \ 56 } \ 57 } while (0) 58 59static int block_iterate_ind(blk_t *ind_block, blk_t ref_block, 60 int ref_offset, struct block_context *ctx) 61{ 62 int ret = 0, changed = 0; 63 int i, flags, limit, offset; 64 blk_t *block_nr; 65 66 limit = ctx->fs->blocksize >> 2; 67 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && 68 !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) 69 ret = (*ctx->func)(ctx->fs, ind_block, 70 BLOCK_COUNT_IND, ref_block, 71 ref_offset, ctx->priv_data); 72 check_for_ro_violation_return(ctx, ret); 73 if (!*ind_block || (ret & BLOCK_ABORT)) { 74 ctx->bcount += limit; 75 return ret; 76 } 77 if (*ind_block >= ctx->fs->super->s_blocks_count || 78 *ind_block < ctx->fs->super->s_first_data_block) { 79 ctx->errcode = EXT2_ET_BAD_IND_BLOCK; 80 ret |= BLOCK_ERROR; 81 return ret; 82 } 83 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block, 84 ctx->ind_buf); 85 if (ctx->errcode) { 86 ret |= BLOCK_ERROR; 87 return ret; 88 } 89 90 block_nr = (blk_t *) ctx->ind_buf; 91 offset = 0; 92 if (ctx->flags & BLOCK_FLAG_APPEND) { 93 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) { 94 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount, 95 *ind_block, offset, 96 ctx->priv_data); 97 changed |= flags; 98 if (flags & BLOCK_ABORT) { 99 ret |= BLOCK_ABORT; 100 break; 101 } 102 offset += sizeof(blk_t); 103 } 104 } else { 105 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) { 106 if (*block_nr == 0) 107 continue; 108 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount, 109 *ind_block, offset, 110 ctx->priv_data); 111 changed |= flags; 112 if (flags & BLOCK_ABORT) { 113 ret |= BLOCK_ABORT; 114 break; 115 } 116 offset += sizeof(blk_t); 117 } 118 } 119 check_for_ro_violation_return(ctx, changed); 120 if (changed & BLOCK_CHANGED) { 121 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block, 122 ctx->ind_buf); 123 if (ctx->errcode) 124 ret |= BLOCK_ERROR | BLOCK_ABORT; 125 } 126 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && 127 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) && 128 !(ret & BLOCK_ABORT)) 129 ret |= (*ctx->func)(ctx->fs, ind_block, 130 BLOCK_COUNT_IND, ref_block, 131 ref_offset, ctx->priv_data); 132 check_for_ro_violation_return(ctx, ret); 133 return ret; 134} 135 136static int block_iterate_dind(blk_t *dind_block, blk_t ref_block, 137 int ref_offset, struct block_context *ctx) 138{ 139 int ret = 0, changed = 0; 140 int i, flags, limit, offset; 141 blk_t *block_nr; 142 143 limit = ctx->fs->blocksize >> 2; 144 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE | 145 BLOCK_FLAG_DATA_ONLY))) 146 ret = (*ctx->func)(ctx->fs, dind_block, 147 BLOCK_COUNT_DIND, ref_block, 148 ref_offset, ctx->priv_data); 149 check_for_ro_violation_return(ctx, ret); 150 if (!*dind_block || (ret & BLOCK_ABORT)) { 151 ctx->bcount += limit*limit; 152 return ret; 153 } 154 if (*dind_block >= ctx->fs->super->s_blocks_count || 155 *dind_block < ctx->fs->super->s_first_data_block) { 156 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK; 157 ret |= BLOCK_ERROR; 158 return ret; 159 } 160 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block, 161 ctx->dind_buf); 162 if (ctx->errcode) { 163 ret |= BLOCK_ERROR; 164 return ret; 165 } 166 167 block_nr = (blk_t *) ctx->dind_buf; 168 offset = 0; 169 if (ctx->flags & BLOCK_FLAG_APPEND) { 170 for (i = 0; i < limit; i++, block_nr++) { 171 flags = block_iterate_ind(block_nr, 172 *dind_block, offset, 173 ctx); 174 changed |= flags; 175 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { 176 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); 177 break; 178 } 179 offset += sizeof(blk_t); 180 } 181 } else { 182 for (i = 0; i < limit; i++, block_nr++) { 183 if (*block_nr == 0) { 184 ctx->bcount += limit; 185 continue; 186 } 187 flags = block_iterate_ind(block_nr, 188 *dind_block, offset, 189 ctx); 190 changed |= flags; 191 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { 192 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); 193 break; 194 } 195 offset += sizeof(blk_t); 196 } 197 } 198 check_for_ro_violation_return(ctx, changed); 199 if (changed & BLOCK_CHANGED) { 200 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block, 201 ctx->dind_buf); 202 if (ctx->errcode) 203 ret |= BLOCK_ERROR | BLOCK_ABORT; 204 } 205 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && 206 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) && 207 !(ret & BLOCK_ABORT)) 208 ret |= (*ctx->func)(ctx->fs, dind_block, 209 BLOCK_COUNT_DIND, ref_block, 210 ref_offset, ctx->priv_data); 211 check_for_ro_violation_return(ctx, ret); 212 return ret; 213} 214 215static int block_iterate_tind(blk_t *tind_block, blk_t ref_block, 216 int ref_offset, struct block_context *ctx) 217{ 218 int ret = 0, changed = 0; 219 int i, flags, limit, offset; 220 blk_t *block_nr; 221 222 limit = ctx->fs->blocksize >> 2; 223 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE | 224 BLOCK_FLAG_DATA_ONLY))) 225 ret = (*ctx->func)(ctx->fs, tind_block, 226 BLOCK_COUNT_TIND, ref_block, 227 ref_offset, ctx->priv_data); 228 check_for_ro_violation_return(ctx, ret); 229 if (!*tind_block || (ret & BLOCK_ABORT)) { 230 ctx->bcount += limit*limit*limit; 231 return ret; 232 } 233 if (*tind_block >= ctx->fs->super->s_blocks_count || 234 *tind_block < ctx->fs->super->s_first_data_block) { 235 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK; 236 ret |= BLOCK_ERROR; 237 return ret; 238 } 239 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block, 240 ctx->tind_buf); 241 if (ctx->errcode) { 242 ret |= BLOCK_ERROR; 243 return ret; 244 } 245 246 block_nr = (blk_t *) ctx->tind_buf; 247 offset = 0; 248 if (ctx->flags & BLOCK_FLAG_APPEND) { 249 for (i = 0; i < limit; i++, block_nr++) { 250 flags = block_iterate_dind(block_nr, 251 *tind_block, 252 offset, ctx); 253 changed |= flags; 254 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { 255 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); 256 break; 257 } 258 offset += sizeof(blk_t); 259 } 260 } else { 261 for (i = 0; i < limit; i++, block_nr++) { 262 if (*block_nr == 0) { 263 ctx->bcount += limit*limit; 264 continue; 265 } 266 flags = block_iterate_dind(block_nr, 267 *tind_block, 268 offset, ctx); 269 changed |= flags; 270 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { 271 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); 272 break; 273 } 274 offset += sizeof(blk_t); 275 } 276 } 277 check_for_ro_violation_return(ctx, changed); 278 if (changed & BLOCK_CHANGED) { 279 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block, 280 ctx->tind_buf); 281 if (ctx->errcode) 282 ret |= BLOCK_ERROR | BLOCK_ABORT; 283 } 284 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && 285 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) && 286 !(ret & BLOCK_ABORT)) 287 ret |= (*ctx->func)(ctx->fs, tind_block, 288 BLOCK_COUNT_TIND, ref_block, 289 ref_offset, ctx->priv_data); 290 check_for_ro_violation_return(ctx, ret); 291 return ret; 292} 293 294errcode_t ext2fs_block_iterate2(ext2_filsys fs, 295 ext2_ino_t ino, 296 int flags, 297 char *block_buf, 298 int (*func)(ext2_filsys fs, 299 blk_t *blocknr, 300 e2_blkcnt_t blockcnt, 301 blk_t ref_blk, 302 int ref_offset, 303 void *priv_data), 304 void *priv_data) 305{ 306 int i; 307 int r, ret = 0; 308 struct ext2_inode inode; 309 errcode_t retval; 310 struct block_context ctx; 311 int limit; 312 313 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS); 314 315 ctx.errcode = ext2fs_read_inode(fs, ino, &inode); 316 if (ctx.errcode) 317 return ctx.errcode; 318 319 /* 320 * Check to see if we need to limit large files 321 */ 322 if (flags & BLOCK_FLAG_NO_LARGE) { 323 if (!LINUX_S_ISDIR(inode.i_mode) && 324 (inode.i_size_high != 0)) 325 return EXT2_ET_FILE_TOO_BIG; 326 } 327 328 limit = fs->blocksize >> 2; 329 330 ctx.fs = fs; 331 ctx.func = func; 332 ctx.priv_data = priv_data; 333 ctx.flags = flags; 334 ctx.bcount = 0; 335 if (block_buf) { 336 ctx.ind_buf = block_buf; 337 } else { 338 retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf); 339 if (retval) 340 return retval; 341 } 342 ctx.dind_buf = ctx.ind_buf + fs->blocksize; 343 ctx.tind_buf = ctx.dind_buf + fs->blocksize; 344 345 /* 346 * Iterate over the HURD translator block (if present) 347 */ 348 if ((fs->super->s_creator_os == EXT2_OS_HURD) && 349 !(flags & BLOCK_FLAG_DATA_ONLY)) { 350 if (inode.osd1.hurd1.h_i_translator) { 351 ret |= (*ctx.func)(fs, 352 &inode.osd1.hurd1.h_i_translator, 353 BLOCK_COUNT_TRANSLATOR, 354 0, 0, priv_data); 355 if (ret & BLOCK_ABORT) 356 goto abort_exit; 357 check_for_ro_violation_goto(&ctx, ret, abort_exit); 358 } 359 } 360 361 if (inode.i_flags & EXT4_EXTENTS_FL) { 362 ext2_extent_handle_t handle; 363 struct ext2fs_extent extent; 364 e2_blkcnt_t blockcnt = 0; 365 blk_t blk, new_blk; 366 int op = EXT2_EXTENT_ROOT; 367 unsigned int j; 368 369 ctx.errcode = ext2fs_extent_open(fs, ino, &handle); 370 if (ctx.errcode) 371 goto abort_exit; 372 373 while (1) { 374 ctx.errcode = ext2fs_extent_get(handle, op, &extent); 375 if (ctx.errcode) { 376 if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT) 377 break; 378 ctx.errcode = 0; 379 if (!(flags & BLOCK_FLAG_APPEND)) 380 break; 381 blk = 0; 382 r = (*ctx.func)(fs, &blk, blockcnt, 383 0, 0, priv_data); 384 ret |= r; 385 check_for_ro_violation_goto(&ctx, ret, 386 extent_errout); 387 if (r & BLOCK_CHANGED) { 388 ctx.errcode = 389 ext2fs_extent_set_bmap(handle, 390 (blk64_t) blockcnt++, 391 (blk64_t) blk, 0); 392 if (ctx.errcode || (ret & BLOCK_ABORT)) 393 break; 394 continue; 395 } 396 break; 397 } 398 399 op = EXT2_EXTENT_NEXT; 400 blk = extent.e_pblk; 401 if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) { 402 if (ctx.flags & BLOCK_FLAG_DATA_ONLY) 403 continue; 404 if ((!(extent.e_flags & 405 EXT2_EXTENT_FLAGS_SECOND_VISIT) && 406 !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) || 407 ((extent.e_flags & 408 EXT2_EXTENT_FLAGS_SECOND_VISIT) && 409 (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) { 410 ret |= (*ctx.func)(fs, &blk, 411 -1, 0, 0, priv_data); 412 if (ret & BLOCK_CHANGED) { 413 extent.e_pblk = blk; 414 ctx.errcode = 415 ext2fs_extent_replace(handle, 0, &extent); 416 if (ctx.errcode) 417 break; 418 } 419 } 420 continue; 421 } 422 for (blockcnt = extent.e_lblk, j = 0; 423 j < extent.e_len; 424 blk++, blockcnt++, j++) { 425 new_blk = blk; 426 r = (*ctx.func)(fs, &new_blk, blockcnt, 427 0, 0, priv_data); 428 ret |= r; 429 check_for_ro_violation_goto(&ctx, ret, 430 extent_errout); 431 if (r & BLOCK_CHANGED) { 432 ctx.errcode = 433 ext2fs_extent_set_bmap(handle, 434 (blk64_t) blockcnt, 435 (blk64_t) new_blk, 0); 436 if (ctx.errcode) 437 goto extent_errout; 438 } 439 if (ret & BLOCK_ABORT) 440 break; 441 } 442 } 443 444 extent_errout: 445 ext2fs_extent_free(handle); 446 ret |= BLOCK_ERROR | BLOCK_ABORT; 447 goto errout; 448 } 449 450 /* 451 * Iterate over normal data blocks 452 */ 453 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) { 454 if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) { 455 ret |= (*ctx.func)(fs, &inode.i_block[i], 456 ctx.bcount, 0, i, priv_data); 457 if (ret & BLOCK_ABORT) 458 goto abort_exit; 459 } 460 } 461 check_for_ro_violation_goto(&ctx, ret, abort_exit); 462 if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) { 463 ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK], 464 0, EXT2_IND_BLOCK, &ctx); 465 if (ret & BLOCK_ABORT) 466 goto abort_exit; 467 } else 468 ctx.bcount += limit; 469 if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) { 470 ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK], 471 0, EXT2_DIND_BLOCK, &ctx); 472 if (ret & BLOCK_ABORT) 473 goto abort_exit; 474 } else 475 ctx.bcount += limit * limit; 476 if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) { 477 ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK], 478 0, EXT2_TIND_BLOCK, &ctx); 479 if (ret & BLOCK_ABORT) 480 goto abort_exit; 481 } 482 483abort_exit: 484 if (ret & BLOCK_CHANGED) { 485 retval = ext2fs_write_inode(fs, ino, &inode); 486 if (retval) 487 return retval; 488 } 489errout: 490 if (!block_buf) 491 ext2fs_free_mem(&ctx.ind_buf); 492 493 return (ret & BLOCK_ERROR) ? ctx.errcode : 0; 494} 495 496/* 497 * Emulate the old ext2fs_block_iterate function! 498 */ 499 500struct xlate { 501 int (*func)(ext2_filsys fs, 502 blk_t *blocknr, 503 int bcount, 504 void *priv_data); 505 void *real_private; 506}; 507 508#ifdef __TURBOC__ 509 #pragma argsused 510#endif 511static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt, 512 blk_t ref_block EXT2FS_ATTR((unused)), 513 int ref_offset EXT2FS_ATTR((unused)), 514 void *priv_data) 515{ 516 struct xlate *xl = (struct xlate *) priv_data; 517 518 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private); 519} 520 521errcode_t ext2fs_block_iterate(ext2_filsys fs, 522 ext2_ino_t ino, 523 int flags, 524 char *block_buf, 525 int (*func)(ext2_filsys fs, 526 blk_t *blocknr, 527 int blockcnt, 528 void *priv_data), 529 void *priv_data) 530{ 531 struct xlate xl; 532 533 xl.real_private = priv_data; 534 xl.func = func; 535 536 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags, 537 block_buf, xlate_func, &xl); 538} 539 540