extents.c revision e861304b8ed83fe43e36d46794d72641c82d4636
1/* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * Architecture independence: 6 * Copyright (c) 2005, Bull S.A. 7 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public Licens 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21 */ 22 23/* 24 * Extents support for EXT4 25 * 26 * TODO: 27 * - ext4*_error() should be used in some situations 28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29 * - smart tree reduction 30 */ 31 32#include <linux/module.h> 33#include <linux/fs.h> 34#include <linux/time.h> 35#include <linux/jbd2.h> 36#include <linux/highuid.h> 37#include <linux/pagemap.h> 38#include <linux/quotaops.h> 39#include <linux/string.h> 40#include <linux/slab.h> 41#include <linux/falloc.h> 42#include <asm/uaccess.h> 43#include <linux/fiemap.h> 44#include "ext4_jbd2.h" 45#include "ext4_extents.h" 46 47#include <trace/events/ext4.h> 48 49static int ext4_split_extent(handle_t *handle, 50 struct inode *inode, 51 struct ext4_ext_path *path, 52 struct ext4_map_blocks *map, 53 int split_flag, 54 int flags); 55 56static int ext4_ext_truncate_extend_restart(handle_t *handle, 57 struct inode *inode, 58 int needed) 59{ 60 int err; 61 62 if (!ext4_handle_valid(handle)) 63 return 0; 64 if (handle->h_buffer_credits > needed) 65 return 0; 66 err = ext4_journal_extend(handle, needed); 67 if (err <= 0) 68 return err; 69 err = ext4_truncate_restart_trans(handle, inode, needed); 70 if (err == 0) 71 err = -EAGAIN; 72 73 return err; 74} 75 76/* 77 * could return: 78 * - EROFS 79 * - ENOMEM 80 */ 81static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 82 struct ext4_ext_path *path) 83{ 84 if (path->p_bh) { 85 /* path points to block */ 86 return ext4_journal_get_write_access(handle, path->p_bh); 87 } 88 /* path points to leaf/index in inode body */ 89 /* we use in-core data, no need to protect them */ 90 return 0; 91} 92 93/* 94 * could return: 95 * - EROFS 96 * - ENOMEM 97 * - EIO 98 */ 99static int ext4_ext_dirty(handle_t *handle, struct inode *inode, 100 struct ext4_ext_path *path) 101{ 102 int err; 103 if (path->p_bh) { 104 /* path points to block */ 105 err = ext4_handle_dirty_metadata(handle, inode, path->p_bh); 106 } else { 107 /* path points to leaf/index in inode body */ 108 err = ext4_mark_inode_dirty(handle, inode); 109 } 110 return err; 111} 112 113static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 114 struct ext4_ext_path *path, 115 ext4_lblk_t block) 116{ 117 struct ext4_inode_info *ei = EXT4_I(inode); 118 ext4_fsblk_t bg_start; 119 ext4_fsblk_t last_block; 120 ext4_grpblk_t colour; 121 ext4_group_t block_group; 122 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 123 int depth; 124 125 if (path) { 126 struct ext4_extent *ex; 127 depth = path->p_depth; 128 129 /* 130 * Try to predict block placement assuming that we are 131 * filling in a file which will eventually be 132 * non-sparse --- i.e., in the case of libbfd writing 133 * an ELF object sections out-of-order but in a way 134 * the eventually results in a contiguous object or 135 * executable file, or some database extending a table 136 * space file. However, this is actually somewhat 137 * non-ideal if we are writing a sparse file such as 138 * qemu or KVM writing a raw image file that is going 139 * to stay fairly sparse, since it will end up 140 * fragmenting the file system's free space. Maybe we 141 * should have some hueristics or some way to allow 142 * userspace to pass a hint to file system, 143 * especially if the latter case turns out to be 144 * common. 145 */ 146 ex = path[depth].p_ext; 147 if (ex) { 148 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 149 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 150 151 if (block > ext_block) 152 return ext_pblk + (block - ext_block); 153 else 154 return ext_pblk - (ext_block - block); 155 } 156 157 /* it looks like index is empty; 158 * try to find starting block from index itself */ 159 if (path[depth].p_bh) 160 return path[depth].p_bh->b_blocknr; 161 } 162 163 /* OK. use inode's group */ 164 block_group = ei->i_block_group; 165 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 166 /* 167 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 168 * block groups per flexgroup, reserve the first block 169 * group for directories and special files. Regular 170 * files will start at the second block group. This 171 * tends to speed up directory access and improves 172 * fsck times. 173 */ 174 block_group &= ~(flex_size-1); 175 if (S_ISREG(inode->i_mode)) 176 block_group++; 177 } 178 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 179 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 180 181 /* 182 * If we are doing delayed allocation, we don't need take 183 * colour into account. 184 */ 185 if (test_opt(inode->i_sb, DELALLOC)) 186 return bg_start; 187 188 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 189 colour = (current->pid % 16) * 190 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 191 else 192 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 193 return bg_start + colour + block; 194} 195 196/* 197 * Allocation for a meta data block 198 */ 199static ext4_fsblk_t 200ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 201 struct ext4_ext_path *path, 202 struct ext4_extent *ex, int *err, unsigned int flags) 203{ 204 ext4_fsblk_t goal, newblock; 205 206 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 207 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 208 NULL, err); 209 return newblock; 210} 211 212static inline int ext4_ext_space_block(struct inode *inode, int check) 213{ 214 int size; 215 216 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 217 / sizeof(struct ext4_extent); 218 if (!check) { 219#ifdef AGGRESSIVE_TEST 220 if (size > 6) 221 size = 6; 222#endif 223 } 224 return size; 225} 226 227static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 228{ 229 int size; 230 231 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 232 / sizeof(struct ext4_extent_idx); 233 if (!check) { 234#ifdef AGGRESSIVE_TEST 235 if (size > 5) 236 size = 5; 237#endif 238 } 239 return size; 240} 241 242static inline int ext4_ext_space_root(struct inode *inode, int check) 243{ 244 int size; 245 246 size = sizeof(EXT4_I(inode)->i_data); 247 size -= sizeof(struct ext4_extent_header); 248 size /= sizeof(struct ext4_extent); 249 if (!check) { 250#ifdef AGGRESSIVE_TEST 251 if (size > 3) 252 size = 3; 253#endif 254 } 255 return size; 256} 257 258static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 259{ 260 int size; 261 262 size = sizeof(EXT4_I(inode)->i_data); 263 size -= sizeof(struct ext4_extent_header); 264 size /= sizeof(struct ext4_extent_idx); 265 if (!check) { 266#ifdef AGGRESSIVE_TEST 267 if (size > 4) 268 size = 4; 269#endif 270 } 271 return size; 272} 273 274/* 275 * Calculate the number of metadata blocks needed 276 * to allocate @blocks 277 * Worse case is one block per extent 278 */ 279int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 280{ 281 struct ext4_inode_info *ei = EXT4_I(inode); 282 int idxs, num = 0; 283 284 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 285 / sizeof(struct ext4_extent_idx)); 286 287 /* 288 * If the new delayed allocation block is contiguous with the 289 * previous da block, it can share index blocks with the 290 * previous block, so we only need to allocate a new index 291 * block every idxs leaf blocks. At ldxs**2 blocks, we need 292 * an additional index block, and at ldxs**3 blocks, yet 293 * another index blocks. 294 */ 295 if (ei->i_da_metadata_calc_len && 296 ei->i_da_metadata_calc_last_lblock+1 == lblock) { 297 if ((ei->i_da_metadata_calc_len % idxs) == 0) 298 num++; 299 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 300 num++; 301 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 302 num++; 303 ei->i_da_metadata_calc_len = 0; 304 } else 305 ei->i_da_metadata_calc_len++; 306 ei->i_da_metadata_calc_last_lblock++; 307 return num; 308 } 309 310 /* 311 * In the worst case we need a new set of index blocks at 312 * every level of the inode's extent tree. 313 */ 314 ei->i_da_metadata_calc_len = 1; 315 ei->i_da_metadata_calc_last_lblock = lblock; 316 return ext_depth(inode) + 1; 317} 318 319static int 320ext4_ext_max_entries(struct inode *inode, int depth) 321{ 322 int max; 323 324 if (depth == ext_depth(inode)) { 325 if (depth == 0) 326 max = ext4_ext_space_root(inode, 1); 327 else 328 max = ext4_ext_space_root_idx(inode, 1); 329 } else { 330 if (depth == 0) 331 max = ext4_ext_space_block(inode, 1); 332 else 333 max = ext4_ext_space_block_idx(inode, 1); 334 } 335 336 return max; 337} 338 339static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 340{ 341 ext4_fsblk_t block = ext4_ext_pblock(ext); 342 int len = ext4_ext_get_actual_len(ext); 343 344 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 345} 346 347static int ext4_valid_extent_idx(struct inode *inode, 348 struct ext4_extent_idx *ext_idx) 349{ 350 ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 351 352 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 353} 354 355static int ext4_valid_extent_entries(struct inode *inode, 356 struct ext4_extent_header *eh, 357 int depth) 358{ 359 struct ext4_extent *ext; 360 struct ext4_extent_idx *ext_idx; 361 unsigned short entries; 362 if (eh->eh_entries == 0) 363 return 1; 364 365 entries = le16_to_cpu(eh->eh_entries); 366 367 if (depth == 0) { 368 /* leaf entries */ 369 ext = EXT_FIRST_EXTENT(eh); 370 while (entries) { 371 if (!ext4_valid_extent(inode, ext)) 372 return 0; 373 ext++; 374 entries--; 375 } 376 } else { 377 ext_idx = EXT_FIRST_INDEX(eh); 378 while (entries) { 379 if (!ext4_valid_extent_idx(inode, ext_idx)) 380 return 0; 381 ext_idx++; 382 entries--; 383 } 384 } 385 return 1; 386} 387 388static int __ext4_ext_check(const char *function, unsigned int line, 389 struct inode *inode, struct ext4_extent_header *eh, 390 int depth) 391{ 392 const char *error_msg; 393 int max = 0; 394 395 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 396 error_msg = "invalid magic"; 397 goto corrupted; 398 } 399 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 400 error_msg = "unexpected eh_depth"; 401 goto corrupted; 402 } 403 if (unlikely(eh->eh_max == 0)) { 404 error_msg = "invalid eh_max"; 405 goto corrupted; 406 } 407 max = ext4_ext_max_entries(inode, depth); 408 if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 409 error_msg = "too large eh_max"; 410 goto corrupted; 411 } 412 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 413 error_msg = "invalid eh_entries"; 414 goto corrupted; 415 } 416 if (!ext4_valid_extent_entries(inode, eh, depth)) { 417 error_msg = "invalid extent entries"; 418 goto corrupted; 419 } 420 return 0; 421 422corrupted: 423 ext4_error_inode(inode, function, line, 0, 424 "bad header/extent: %s - magic %x, " 425 "entries %u, max %u(%u), depth %u(%u)", 426 error_msg, le16_to_cpu(eh->eh_magic), 427 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 428 max, le16_to_cpu(eh->eh_depth), depth); 429 430 return -EIO; 431} 432 433#define ext4_ext_check(inode, eh, depth) \ 434 __ext4_ext_check(__func__, __LINE__, inode, eh, depth) 435 436int ext4_ext_check_inode(struct inode *inode) 437{ 438 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); 439} 440 441#ifdef EXT_DEBUG 442static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 443{ 444 int k, l = path->p_depth; 445 446 ext_debug("path:"); 447 for (k = 0; k <= l; k++, path++) { 448 if (path->p_idx) { 449 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 450 ext4_idx_pblock(path->p_idx)); 451 } else if (path->p_ext) { 452 ext_debug(" %d:[%d]%d:%llu ", 453 le32_to_cpu(path->p_ext->ee_block), 454 ext4_ext_is_uninitialized(path->p_ext), 455 ext4_ext_get_actual_len(path->p_ext), 456 ext4_ext_pblock(path->p_ext)); 457 } else 458 ext_debug(" []"); 459 } 460 ext_debug("\n"); 461} 462 463static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 464{ 465 int depth = ext_depth(inode); 466 struct ext4_extent_header *eh; 467 struct ext4_extent *ex; 468 int i; 469 470 if (!path) 471 return; 472 473 eh = path[depth].p_hdr; 474 ex = EXT_FIRST_EXTENT(eh); 475 476 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 477 478 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 479 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 480 ext4_ext_is_uninitialized(ex), 481 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 482 } 483 ext_debug("\n"); 484} 485#else 486#define ext4_ext_show_path(inode, path) 487#define ext4_ext_show_leaf(inode, path) 488#endif 489 490void ext4_ext_drop_refs(struct ext4_ext_path *path) 491{ 492 int depth = path->p_depth; 493 int i; 494 495 for (i = 0; i <= depth; i++, path++) 496 if (path->p_bh) { 497 brelse(path->p_bh); 498 path->p_bh = NULL; 499 } 500} 501 502/* 503 * ext4_ext_binsearch_idx: 504 * binary search for the closest index of the given block 505 * the header must be checked before calling this 506 */ 507static void 508ext4_ext_binsearch_idx(struct inode *inode, 509 struct ext4_ext_path *path, ext4_lblk_t block) 510{ 511 struct ext4_extent_header *eh = path->p_hdr; 512 struct ext4_extent_idx *r, *l, *m; 513 514 515 ext_debug("binsearch for %u(idx): ", block); 516 517 l = EXT_FIRST_INDEX(eh) + 1; 518 r = EXT_LAST_INDEX(eh); 519 while (l <= r) { 520 m = l + (r - l) / 2; 521 if (block < le32_to_cpu(m->ei_block)) 522 r = m - 1; 523 else 524 l = m + 1; 525 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 526 m, le32_to_cpu(m->ei_block), 527 r, le32_to_cpu(r->ei_block)); 528 } 529 530 path->p_idx = l - 1; 531 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), 532 ext4_idx_pblock(path->p_idx)); 533 534#ifdef CHECK_BINSEARCH 535 { 536 struct ext4_extent_idx *chix, *ix; 537 int k; 538 539 chix = ix = EXT_FIRST_INDEX(eh); 540 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 541 if (k != 0 && 542 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 543 printk(KERN_DEBUG "k=%d, ix=0x%p, " 544 "first=0x%p\n", k, 545 ix, EXT_FIRST_INDEX(eh)); 546 printk(KERN_DEBUG "%u <= %u\n", 547 le32_to_cpu(ix->ei_block), 548 le32_to_cpu(ix[-1].ei_block)); 549 } 550 BUG_ON(k && le32_to_cpu(ix->ei_block) 551 <= le32_to_cpu(ix[-1].ei_block)); 552 if (block < le32_to_cpu(ix->ei_block)) 553 break; 554 chix = ix; 555 } 556 BUG_ON(chix != path->p_idx); 557 } 558#endif 559 560} 561 562/* 563 * ext4_ext_binsearch: 564 * binary search for closest extent of the given block 565 * the header must be checked before calling this 566 */ 567static void 568ext4_ext_binsearch(struct inode *inode, 569 struct ext4_ext_path *path, ext4_lblk_t block) 570{ 571 struct ext4_extent_header *eh = path->p_hdr; 572 struct ext4_extent *r, *l, *m; 573 574 if (eh->eh_entries == 0) { 575 /* 576 * this leaf is empty: 577 * we get such a leaf in split/add case 578 */ 579 return; 580 } 581 582 ext_debug("binsearch for %u: ", block); 583 584 l = EXT_FIRST_EXTENT(eh) + 1; 585 r = EXT_LAST_EXTENT(eh); 586 587 while (l <= r) { 588 m = l + (r - l) / 2; 589 if (block < le32_to_cpu(m->ee_block)) 590 r = m - 1; 591 else 592 l = m + 1; 593 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 594 m, le32_to_cpu(m->ee_block), 595 r, le32_to_cpu(r->ee_block)); 596 } 597 598 path->p_ext = l - 1; 599 ext_debug(" -> %d:%llu:[%d]%d ", 600 le32_to_cpu(path->p_ext->ee_block), 601 ext4_ext_pblock(path->p_ext), 602 ext4_ext_is_uninitialized(path->p_ext), 603 ext4_ext_get_actual_len(path->p_ext)); 604 605#ifdef CHECK_BINSEARCH 606 { 607 struct ext4_extent *chex, *ex; 608 int k; 609 610 chex = ex = EXT_FIRST_EXTENT(eh); 611 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 612 BUG_ON(k && le32_to_cpu(ex->ee_block) 613 <= le32_to_cpu(ex[-1].ee_block)); 614 if (block < le32_to_cpu(ex->ee_block)) 615 break; 616 chex = ex; 617 } 618 BUG_ON(chex != path->p_ext); 619 } 620#endif 621 622} 623 624int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 625{ 626 struct ext4_extent_header *eh; 627 628 eh = ext_inode_hdr(inode); 629 eh->eh_depth = 0; 630 eh->eh_entries = 0; 631 eh->eh_magic = EXT4_EXT_MAGIC; 632 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 633 ext4_mark_inode_dirty(handle, inode); 634 ext4_ext_invalidate_cache(inode); 635 return 0; 636} 637 638struct ext4_ext_path * 639ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 640 struct ext4_ext_path *path) 641{ 642 struct ext4_extent_header *eh; 643 struct buffer_head *bh; 644 short int depth, i, ppos = 0, alloc = 0; 645 646 eh = ext_inode_hdr(inode); 647 depth = ext_depth(inode); 648 649 /* account possible depth increase */ 650 if (!path) { 651 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 652 GFP_NOFS); 653 if (!path) 654 return ERR_PTR(-ENOMEM); 655 alloc = 1; 656 } 657 path[0].p_hdr = eh; 658 path[0].p_bh = NULL; 659 660 i = depth; 661 /* walk through the tree */ 662 while (i) { 663 int need_to_validate = 0; 664 665 ext_debug("depth %d: num %d, max %d\n", 666 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 667 668 ext4_ext_binsearch_idx(inode, path + ppos, block); 669 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 670 path[ppos].p_depth = i; 671 path[ppos].p_ext = NULL; 672 673 bh = sb_getblk(inode->i_sb, path[ppos].p_block); 674 if (unlikely(!bh)) 675 goto err; 676 if (!bh_uptodate_or_lock(bh)) { 677 trace_ext4_ext_load_extent(inode, block, 678 path[ppos].p_block); 679 if (bh_submit_read(bh) < 0) { 680 put_bh(bh); 681 goto err; 682 } 683 /* validate the extent entries */ 684 need_to_validate = 1; 685 } 686 eh = ext_block_hdr(bh); 687 ppos++; 688 if (unlikely(ppos > depth)) { 689 put_bh(bh); 690 EXT4_ERROR_INODE(inode, 691 "ppos %d > depth %d", ppos, depth); 692 goto err; 693 } 694 path[ppos].p_bh = bh; 695 path[ppos].p_hdr = eh; 696 i--; 697 698 if (need_to_validate && ext4_ext_check(inode, eh, i)) 699 goto err; 700 } 701 702 path[ppos].p_depth = i; 703 path[ppos].p_ext = NULL; 704 path[ppos].p_idx = NULL; 705 706 /* find extent */ 707 ext4_ext_binsearch(inode, path + ppos, block); 708 /* if not an empty leaf */ 709 if (path[ppos].p_ext) 710 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 711 712 ext4_ext_show_path(inode, path); 713 714 return path; 715 716err: 717 ext4_ext_drop_refs(path); 718 if (alloc) 719 kfree(path); 720 return ERR_PTR(-EIO); 721} 722 723/* 724 * ext4_ext_insert_index: 725 * insert new index [@logical;@ptr] into the block at @curp; 726 * check where to insert: before @curp or after @curp 727 */ 728static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 729 struct ext4_ext_path *curp, 730 int logical, ext4_fsblk_t ptr) 731{ 732 struct ext4_extent_idx *ix; 733 int len, err; 734 735 err = ext4_ext_get_access(handle, inode, curp); 736 if (err) 737 return err; 738 739 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 740 EXT4_ERROR_INODE(inode, 741 "logical %d == ei_block %d!", 742 logical, le32_to_cpu(curp->p_idx->ei_block)); 743 return -EIO; 744 } 745 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; 746 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 747 /* insert after */ 748 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) { 749 len = (len - 1) * sizeof(struct ext4_extent_idx); 750 len = len < 0 ? 0 : len; 751 ext_debug("insert new index %d after: %llu. " 752 "move %d from 0x%p to 0x%p\n", 753 logical, ptr, len, 754 (curp->p_idx + 1), (curp->p_idx + 2)); 755 memmove(curp->p_idx + 2, curp->p_idx + 1, len); 756 } 757 ix = curp->p_idx + 1; 758 } else { 759 /* insert before */ 760 len = len * sizeof(struct ext4_extent_idx); 761 len = len < 0 ? 0 : len; 762 ext_debug("insert new index %d before: %llu. " 763 "move %d from 0x%p to 0x%p\n", 764 logical, ptr, len, 765 curp->p_idx, (curp->p_idx + 1)); 766 memmove(curp->p_idx + 1, curp->p_idx, len); 767 ix = curp->p_idx; 768 } 769 770 ix->ei_block = cpu_to_le32(logical); 771 ext4_idx_store_pblock(ix, ptr); 772 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 773 774 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 775 > le16_to_cpu(curp->p_hdr->eh_max))) { 776 EXT4_ERROR_INODE(inode, 777 "logical %d == ei_block %d!", 778 logical, le32_to_cpu(curp->p_idx->ei_block)); 779 return -EIO; 780 } 781 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 782 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 783 return -EIO; 784 } 785 786 err = ext4_ext_dirty(handle, inode, curp); 787 ext4_std_error(inode->i_sb, err); 788 789 return err; 790} 791 792/* 793 * ext4_ext_split: 794 * inserts new subtree into the path, using free index entry 795 * at depth @at: 796 * - allocates all needed blocks (new leaf and all intermediate index blocks) 797 * - makes decision where to split 798 * - moves remaining extents and index entries (right to the split point) 799 * into the newly allocated blocks 800 * - initializes subtree 801 */ 802static int ext4_ext_split(handle_t *handle, struct inode *inode, 803 unsigned int flags, 804 struct ext4_ext_path *path, 805 struct ext4_extent *newext, int at) 806{ 807 struct buffer_head *bh = NULL; 808 int depth = ext_depth(inode); 809 struct ext4_extent_header *neh; 810 struct ext4_extent_idx *fidx; 811 struct ext4_extent *ex; 812 int i = at, k, m, a; 813 ext4_fsblk_t newblock, oldblock; 814 __le32 border; 815 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 816 int err = 0; 817 818 /* make decision: where to split? */ 819 /* FIXME: now decision is simplest: at current extent */ 820 821 /* if current leaf will be split, then we should use 822 * border from split point */ 823 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 824 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 825 return -EIO; 826 } 827 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 828 border = path[depth].p_ext[1].ee_block; 829 ext_debug("leaf will be split." 830 " next leaf starts at %d\n", 831 le32_to_cpu(border)); 832 } else { 833 border = newext->ee_block; 834 ext_debug("leaf will be added." 835 " next leaf starts at %d\n", 836 le32_to_cpu(border)); 837 } 838 839 /* 840 * If error occurs, then we break processing 841 * and mark filesystem read-only. index won't 842 * be inserted and tree will be in consistent 843 * state. Next mount will repair buffers too. 844 */ 845 846 /* 847 * Get array to track all allocated blocks. 848 * We need this to handle errors and free blocks 849 * upon them. 850 */ 851 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 852 if (!ablocks) 853 return -ENOMEM; 854 855 /* allocate all needed blocks */ 856 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 857 for (a = 0; a < depth - at; a++) { 858 newblock = ext4_ext_new_meta_block(handle, inode, path, 859 newext, &err, flags); 860 if (newblock == 0) 861 goto cleanup; 862 ablocks[a] = newblock; 863 } 864 865 /* initialize new leaf */ 866 newblock = ablocks[--a]; 867 if (unlikely(newblock == 0)) { 868 EXT4_ERROR_INODE(inode, "newblock == 0!"); 869 err = -EIO; 870 goto cleanup; 871 } 872 bh = sb_getblk(inode->i_sb, newblock); 873 if (!bh) { 874 err = -EIO; 875 goto cleanup; 876 } 877 lock_buffer(bh); 878 879 err = ext4_journal_get_create_access(handle, bh); 880 if (err) 881 goto cleanup; 882 883 neh = ext_block_hdr(bh); 884 neh->eh_entries = 0; 885 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 886 neh->eh_magic = EXT4_EXT_MAGIC; 887 neh->eh_depth = 0; 888 ex = EXT_FIRST_EXTENT(neh); 889 890 /* move remainder of path[depth] to the new leaf */ 891 if (unlikely(path[depth].p_hdr->eh_entries != 892 path[depth].p_hdr->eh_max)) { 893 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 894 path[depth].p_hdr->eh_entries, 895 path[depth].p_hdr->eh_max); 896 err = -EIO; 897 goto cleanup; 898 } 899 /* start copy from next extent */ 900 /* TODO: we could do it by single memmove */ 901 m = 0; 902 path[depth].p_ext++; 903 while (path[depth].p_ext <= 904 EXT_MAX_EXTENT(path[depth].p_hdr)) { 905 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 906 le32_to_cpu(path[depth].p_ext->ee_block), 907 ext4_ext_pblock(path[depth].p_ext), 908 ext4_ext_is_uninitialized(path[depth].p_ext), 909 ext4_ext_get_actual_len(path[depth].p_ext), 910 newblock); 911 /*memmove(ex++, path[depth].p_ext++, 912 sizeof(struct ext4_extent)); 913 neh->eh_entries++;*/ 914 path[depth].p_ext++; 915 m++; 916 } 917 if (m) { 918 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); 919 le16_add_cpu(&neh->eh_entries, m); 920 } 921 922 set_buffer_uptodate(bh); 923 unlock_buffer(bh); 924 925 err = ext4_handle_dirty_metadata(handle, inode, bh); 926 if (err) 927 goto cleanup; 928 brelse(bh); 929 bh = NULL; 930 931 /* correct old leaf */ 932 if (m) { 933 err = ext4_ext_get_access(handle, inode, path + depth); 934 if (err) 935 goto cleanup; 936 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 937 err = ext4_ext_dirty(handle, inode, path + depth); 938 if (err) 939 goto cleanup; 940 941 } 942 943 /* create intermediate indexes */ 944 k = depth - at - 1; 945 if (unlikely(k < 0)) { 946 EXT4_ERROR_INODE(inode, "k %d < 0!", k); 947 err = -EIO; 948 goto cleanup; 949 } 950 if (k) 951 ext_debug("create %d intermediate indices\n", k); 952 /* insert new index into current index block */ 953 /* current depth stored in i var */ 954 i = depth - 1; 955 while (k--) { 956 oldblock = newblock; 957 newblock = ablocks[--a]; 958 bh = sb_getblk(inode->i_sb, newblock); 959 if (!bh) { 960 err = -EIO; 961 goto cleanup; 962 } 963 lock_buffer(bh); 964 965 err = ext4_journal_get_create_access(handle, bh); 966 if (err) 967 goto cleanup; 968 969 neh = ext_block_hdr(bh); 970 neh->eh_entries = cpu_to_le16(1); 971 neh->eh_magic = EXT4_EXT_MAGIC; 972 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 973 neh->eh_depth = cpu_to_le16(depth - i); 974 fidx = EXT_FIRST_INDEX(neh); 975 fidx->ei_block = border; 976 ext4_idx_store_pblock(fidx, oldblock); 977 978 ext_debug("int.index at %d (block %llu): %u -> %llu\n", 979 i, newblock, le32_to_cpu(border), oldblock); 980 /* copy indexes */ 981 m = 0; 982 path[i].p_idx++; 983 984 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 985 EXT_MAX_INDEX(path[i].p_hdr)); 986 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 987 EXT_LAST_INDEX(path[i].p_hdr))) { 988 EXT4_ERROR_INODE(inode, 989 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 990 le32_to_cpu(path[i].p_ext->ee_block)); 991 err = -EIO; 992 goto cleanup; 993 } 994 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { 995 ext_debug("%d: move %d:%llu in new index %llu\n", i, 996 le32_to_cpu(path[i].p_idx->ei_block), 997 ext4_idx_pblock(path[i].p_idx), 998 newblock); 999 /*memmove(++fidx, path[i].p_idx++, 1000 sizeof(struct ext4_extent_idx)); 1001 neh->eh_entries++; 1002 BUG_ON(neh->eh_entries > neh->eh_max);*/ 1003 path[i].p_idx++; 1004 m++; 1005 } 1006 if (m) { 1007 memmove(++fidx, path[i].p_idx - m, 1008 sizeof(struct ext4_extent_idx) * m); 1009 le16_add_cpu(&neh->eh_entries, m); 1010 } 1011 set_buffer_uptodate(bh); 1012 unlock_buffer(bh); 1013 1014 err = ext4_handle_dirty_metadata(handle, inode, bh); 1015 if (err) 1016 goto cleanup; 1017 brelse(bh); 1018 bh = NULL; 1019 1020 /* correct old index */ 1021 if (m) { 1022 err = ext4_ext_get_access(handle, inode, path + i); 1023 if (err) 1024 goto cleanup; 1025 le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1026 err = ext4_ext_dirty(handle, inode, path + i); 1027 if (err) 1028 goto cleanup; 1029 } 1030 1031 i--; 1032 } 1033 1034 /* insert new index */ 1035 err = ext4_ext_insert_index(handle, inode, path + at, 1036 le32_to_cpu(border), newblock); 1037 1038cleanup: 1039 if (bh) { 1040 if (buffer_locked(bh)) 1041 unlock_buffer(bh); 1042 brelse(bh); 1043 } 1044 1045 if (err) { 1046 /* free all allocated blocks in error case */ 1047 for (i = 0; i < depth; i++) { 1048 if (!ablocks[i]) 1049 continue; 1050 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1051 EXT4_FREE_BLOCKS_METADATA); 1052 } 1053 } 1054 kfree(ablocks); 1055 1056 return err; 1057} 1058 1059/* 1060 * ext4_ext_grow_indepth: 1061 * implements tree growing procedure: 1062 * - allocates new block 1063 * - moves top-level data (index block or leaf) into the new block 1064 * - initializes new top-level, creating index that points to the 1065 * just created block 1066 */ 1067static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1068 unsigned int flags, 1069 struct ext4_ext_path *path, 1070 struct ext4_extent *newext) 1071{ 1072 struct ext4_ext_path *curp = path; 1073 struct ext4_extent_header *neh; 1074 struct buffer_head *bh; 1075 ext4_fsblk_t newblock; 1076 int err = 0; 1077 1078 newblock = ext4_ext_new_meta_block(handle, inode, path, 1079 newext, &err, flags); 1080 if (newblock == 0) 1081 return err; 1082 1083 bh = sb_getblk(inode->i_sb, newblock); 1084 if (!bh) { 1085 err = -EIO; 1086 ext4_std_error(inode->i_sb, err); 1087 return err; 1088 } 1089 lock_buffer(bh); 1090 1091 err = ext4_journal_get_create_access(handle, bh); 1092 if (err) { 1093 unlock_buffer(bh); 1094 goto out; 1095 } 1096 1097 /* move top-level index/leaf into new block */ 1098 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data)); 1099 1100 /* set size of new block */ 1101 neh = ext_block_hdr(bh); 1102 /* old root could have indexes or leaves 1103 * so calculate e_max right way */ 1104 if (ext_depth(inode)) 1105 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1106 else 1107 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1108 neh->eh_magic = EXT4_EXT_MAGIC; 1109 set_buffer_uptodate(bh); 1110 unlock_buffer(bh); 1111 1112 err = ext4_handle_dirty_metadata(handle, inode, bh); 1113 if (err) 1114 goto out; 1115 1116 /* create index in new top-level index: num,max,pointer */ 1117 err = ext4_ext_get_access(handle, inode, curp); 1118 if (err) 1119 goto out; 1120 1121 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; 1122 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 1123 curp->p_hdr->eh_entries = cpu_to_le16(1); 1124 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr); 1125 1126 if (path[0].p_hdr->eh_depth) 1127 curp->p_idx->ei_block = 1128 EXT_FIRST_INDEX(path[0].p_hdr)->ei_block; 1129 else 1130 curp->p_idx->ei_block = 1131 EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block; 1132 ext4_idx_store_pblock(curp->p_idx, newblock); 1133 1134 neh = ext_inode_hdr(inode); 1135 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1136 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 1137 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1138 ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1139 1140 neh->eh_depth = cpu_to_le16(path->p_depth + 1); 1141 err = ext4_ext_dirty(handle, inode, curp); 1142out: 1143 brelse(bh); 1144 1145 return err; 1146} 1147 1148/* 1149 * ext4_ext_create_new_leaf: 1150 * finds empty index and adds new leaf. 1151 * if no free index is found, then it requests in-depth growing. 1152 */ 1153static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1154 unsigned int flags, 1155 struct ext4_ext_path *path, 1156 struct ext4_extent *newext) 1157{ 1158 struct ext4_ext_path *curp; 1159 int depth, i, err = 0; 1160 1161repeat: 1162 i = depth = ext_depth(inode); 1163 1164 /* walk up to the tree and look for free index entry */ 1165 curp = path + depth; 1166 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1167 i--; 1168 curp--; 1169 } 1170 1171 /* we use already allocated block for index block, 1172 * so subsequent data blocks should be contiguous */ 1173 if (EXT_HAS_FREE_INDEX(curp)) { 1174 /* if we found index with free entry, then use that 1175 * entry: create all needed subtree and add new leaf */ 1176 err = ext4_ext_split(handle, inode, flags, path, newext, i); 1177 if (err) 1178 goto out; 1179 1180 /* refill path */ 1181 ext4_ext_drop_refs(path); 1182 path = ext4_ext_find_extent(inode, 1183 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1184 path); 1185 if (IS_ERR(path)) 1186 err = PTR_ERR(path); 1187 } else { 1188 /* tree is full, time to grow in depth */ 1189 err = ext4_ext_grow_indepth(handle, inode, flags, 1190 path, newext); 1191 if (err) 1192 goto out; 1193 1194 /* refill path */ 1195 ext4_ext_drop_refs(path); 1196 path = ext4_ext_find_extent(inode, 1197 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1198 path); 1199 if (IS_ERR(path)) { 1200 err = PTR_ERR(path); 1201 goto out; 1202 } 1203 1204 /* 1205 * only first (depth 0 -> 1) produces free space; 1206 * in all other cases we have to split the grown tree 1207 */ 1208 depth = ext_depth(inode); 1209 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1210 /* now we need to split */ 1211 goto repeat; 1212 } 1213 } 1214 1215out: 1216 return err; 1217} 1218 1219/* 1220 * search the closest allocated block to the left for *logical 1221 * and returns it at @logical + it's physical address at @phys 1222 * if *logical is the smallest allocated block, the function 1223 * returns 0 at @phys 1224 * return value contains 0 (success) or error code 1225 */ 1226static int ext4_ext_search_left(struct inode *inode, 1227 struct ext4_ext_path *path, 1228 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1229{ 1230 struct ext4_extent_idx *ix; 1231 struct ext4_extent *ex; 1232 int depth, ee_len; 1233 1234 if (unlikely(path == NULL)) { 1235 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1236 return -EIO; 1237 } 1238 depth = path->p_depth; 1239 *phys = 0; 1240 1241 if (depth == 0 && path->p_ext == NULL) 1242 return 0; 1243 1244 /* usually extent in the path covers blocks smaller 1245 * then *logical, but it can be that extent is the 1246 * first one in the file */ 1247 1248 ex = path[depth].p_ext; 1249 ee_len = ext4_ext_get_actual_len(ex); 1250 if (*logical < le32_to_cpu(ex->ee_block)) { 1251 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1252 EXT4_ERROR_INODE(inode, 1253 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1254 *logical, le32_to_cpu(ex->ee_block)); 1255 return -EIO; 1256 } 1257 while (--depth >= 0) { 1258 ix = path[depth].p_idx; 1259 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1260 EXT4_ERROR_INODE(inode, 1261 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 1262 ix != NULL ? ix->ei_block : 0, 1263 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 1264 EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0, 1265 depth); 1266 return -EIO; 1267 } 1268 } 1269 return 0; 1270 } 1271 1272 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1273 EXT4_ERROR_INODE(inode, 1274 "logical %d < ee_block %d + ee_len %d!", 1275 *logical, le32_to_cpu(ex->ee_block), ee_len); 1276 return -EIO; 1277 } 1278 1279 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1280 *phys = ext4_ext_pblock(ex) + ee_len - 1; 1281 return 0; 1282} 1283 1284/* 1285 * search the closest allocated block to the right for *logical 1286 * and returns it at @logical + it's physical address at @phys 1287 * if *logical is the smallest allocated block, the function 1288 * returns 0 at @phys 1289 * return value contains 0 (success) or error code 1290 */ 1291static int ext4_ext_search_right(struct inode *inode, 1292 struct ext4_ext_path *path, 1293 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1294{ 1295 struct buffer_head *bh = NULL; 1296 struct ext4_extent_header *eh; 1297 struct ext4_extent_idx *ix; 1298 struct ext4_extent *ex; 1299 ext4_fsblk_t block; 1300 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1301 int ee_len; 1302 1303 if (unlikely(path == NULL)) { 1304 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1305 return -EIO; 1306 } 1307 depth = path->p_depth; 1308 *phys = 0; 1309 1310 if (depth == 0 && path->p_ext == NULL) 1311 return 0; 1312 1313 /* usually extent in the path covers blocks smaller 1314 * then *logical, but it can be that extent is the 1315 * first one in the file */ 1316 1317 ex = path[depth].p_ext; 1318 ee_len = ext4_ext_get_actual_len(ex); 1319 if (*logical < le32_to_cpu(ex->ee_block)) { 1320 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1321 EXT4_ERROR_INODE(inode, 1322 "first_extent(path[%d].p_hdr) != ex", 1323 depth); 1324 return -EIO; 1325 } 1326 while (--depth >= 0) { 1327 ix = path[depth].p_idx; 1328 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1329 EXT4_ERROR_INODE(inode, 1330 "ix != EXT_FIRST_INDEX *logical %d!", 1331 *logical); 1332 return -EIO; 1333 } 1334 } 1335 *logical = le32_to_cpu(ex->ee_block); 1336 *phys = ext4_ext_pblock(ex); 1337 return 0; 1338 } 1339 1340 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1341 EXT4_ERROR_INODE(inode, 1342 "logical %d < ee_block %d + ee_len %d!", 1343 *logical, le32_to_cpu(ex->ee_block), ee_len); 1344 return -EIO; 1345 } 1346 1347 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1348 /* next allocated block in this leaf */ 1349 ex++; 1350 *logical = le32_to_cpu(ex->ee_block); 1351 *phys = ext4_ext_pblock(ex); 1352 return 0; 1353 } 1354 1355 /* go up and search for index to the right */ 1356 while (--depth >= 0) { 1357 ix = path[depth].p_idx; 1358 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 1359 goto got_index; 1360 } 1361 1362 /* we've gone up to the root and found no index to the right */ 1363 return 0; 1364 1365got_index: 1366 /* we've found index to the right, let's 1367 * follow it and find the closest allocated 1368 * block to the right */ 1369 ix++; 1370 block = ext4_idx_pblock(ix); 1371 while (++depth < path->p_depth) { 1372 bh = sb_bread(inode->i_sb, block); 1373 if (bh == NULL) 1374 return -EIO; 1375 eh = ext_block_hdr(bh); 1376 /* subtract from p_depth to get proper eh_depth */ 1377 if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 1378 put_bh(bh); 1379 return -EIO; 1380 } 1381 ix = EXT_FIRST_INDEX(eh); 1382 block = ext4_idx_pblock(ix); 1383 put_bh(bh); 1384 } 1385 1386 bh = sb_bread(inode->i_sb, block); 1387 if (bh == NULL) 1388 return -EIO; 1389 eh = ext_block_hdr(bh); 1390 if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 1391 put_bh(bh); 1392 return -EIO; 1393 } 1394 ex = EXT_FIRST_EXTENT(eh); 1395 *logical = le32_to_cpu(ex->ee_block); 1396 *phys = ext4_ext_pblock(ex); 1397 put_bh(bh); 1398 return 0; 1399} 1400 1401/* 1402 * ext4_ext_next_allocated_block: 1403 * returns allocated block in subsequent extent or EXT_MAX_BLOCK. 1404 * NOTE: it considers block number from index entry as 1405 * allocated block. Thus, index entries have to be consistent 1406 * with leaves. 1407 */ 1408static ext4_lblk_t 1409ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1410{ 1411 int depth; 1412 1413 BUG_ON(path == NULL); 1414 depth = path->p_depth; 1415 1416 if (depth == 0 && path->p_ext == NULL) 1417 return EXT_MAX_BLOCK; 1418 1419 while (depth >= 0) { 1420 if (depth == path->p_depth) { 1421 /* leaf */ 1422 if (path[depth].p_ext != 1423 EXT_LAST_EXTENT(path[depth].p_hdr)) 1424 return le32_to_cpu(path[depth].p_ext[1].ee_block); 1425 } else { 1426 /* index */ 1427 if (path[depth].p_idx != 1428 EXT_LAST_INDEX(path[depth].p_hdr)) 1429 return le32_to_cpu(path[depth].p_idx[1].ei_block); 1430 } 1431 depth--; 1432 } 1433 1434 return EXT_MAX_BLOCK; 1435} 1436 1437/* 1438 * ext4_ext_next_leaf_block: 1439 * returns first allocated block from next leaf or EXT_MAX_BLOCK 1440 */ 1441static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode, 1442 struct ext4_ext_path *path) 1443{ 1444 int depth; 1445 1446 BUG_ON(path == NULL); 1447 depth = path->p_depth; 1448 1449 /* zero-tree has no leaf blocks at all */ 1450 if (depth == 0) 1451 return EXT_MAX_BLOCK; 1452 1453 /* go to index block */ 1454 depth--; 1455 1456 while (depth >= 0) { 1457 if (path[depth].p_idx != 1458 EXT_LAST_INDEX(path[depth].p_hdr)) 1459 return (ext4_lblk_t) 1460 le32_to_cpu(path[depth].p_idx[1].ei_block); 1461 depth--; 1462 } 1463 1464 return EXT_MAX_BLOCK; 1465} 1466 1467/* 1468 * ext4_ext_correct_indexes: 1469 * if leaf gets modified and modified extent is first in the leaf, 1470 * then we have to correct all indexes above. 1471 * TODO: do we need to correct tree in all cases? 1472 */ 1473static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1474 struct ext4_ext_path *path) 1475{ 1476 struct ext4_extent_header *eh; 1477 int depth = ext_depth(inode); 1478 struct ext4_extent *ex; 1479 __le32 border; 1480 int k, err = 0; 1481 1482 eh = path[depth].p_hdr; 1483 ex = path[depth].p_ext; 1484 1485 if (unlikely(ex == NULL || eh == NULL)) { 1486 EXT4_ERROR_INODE(inode, 1487 "ex %p == NULL or eh %p == NULL", ex, eh); 1488 return -EIO; 1489 } 1490 1491 if (depth == 0) { 1492 /* there is no tree at all */ 1493 return 0; 1494 } 1495 1496 if (ex != EXT_FIRST_EXTENT(eh)) { 1497 /* we correct tree if first leaf got modified only */ 1498 return 0; 1499 } 1500 1501 /* 1502 * TODO: we need correction if border is smaller than current one 1503 */ 1504 k = depth - 1; 1505 border = path[depth].p_ext->ee_block; 1506 err = ext4_ext_get_access(handle, inode, path + k); 1507 if (err) 1508 return err; 1509 path[k].p_idx->ei_block = border; 1510 err = ext4_ext_dirty(handle, inode, path + k); 1511 if (err) 1512 return err; 1513 1514 while (k--) { 1515 /* change all left-side indexes */ 1516 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1517 break; 1518 err = ext4_ext_get_access(handle, inode, path + k); 1519 if (err) 1520 break; 1521 path[k].p_idx->ei_block = border; 1522 err = ext4_ext_dirty(handle, inode, path + k); 1523 if (err) 1524 break; 1525 } 1526 1527 return err; 1528} 1529 1530int 1531ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1532 struct ext4_extent *ex2) 1533{ 1534 unsigned short ext1_ee_len, ext2_ee_len, max_len; 1535 1536 /* 1537 * Make sure that either both extents are uninitialized, or 1538 * both are _not_. 1539 */ 1540 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) 1541 return 0; 1542 1543 if (ext4_ext_is_uninitialized(ex1)) 1544 max_len = EXT_UNINIT_MAX_LEN; 1545 else 1546 max_len = EXT_INIT_MAX_LEN; 1547 1548 ext1_ee_len = ext4_ext_get_actual_len(ex1); 1549 ext2_ee_len = ext4_ext_get_actual_len(ex2); 1550 1551 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 1552 le32_to_cpu(ex2->ee_block)) 1553 return 0; 1554 1555 /* 1556 * To allow future support for preallocated extents to be added 1557 * as an RO_COMPAT feature, refuse to merge to extents if 1558 * this can result in the top bit of ee_len being set. 1559 */ 1560 if (ext1_ee_len + ext2_ee_len > max_len) 1561 return 0; 1562#ifdef AGGRESSIVE_TEST 1563 if (ext1_ee_len >= 4) 1564 return 0; 1565#endif 1566 1567 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1568 return 1; 1569 return 0; 1570} 1571 1572/* 1573 * This function tries to merge the "ex" extent to the next extent in the tree. 1574 * It always tries to merge towards right. If you want to merge towards 1575 * left, pass "ex - 1" as argument instead of "ex". 1576 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 1577 * 1 if they got merged. 1578 */ 1579static int ext4_ext_try_to_merge_right(struct inode *inode, 1580 struct ext4_ext_path *path, 1581 struct ext4_extent *ex) 1582{ 1583 struct ext4_extent_header *eh; 1584 unsigned int depth, len; 1585 int merge_done = 0; 1586 int uninitialized = 0; 1587 1588 depth = ext_depth(inode); 1589 BUG_ON(path[depth].p_hdr == NULL); 1590 eh = path[depth].p_hdr; 1591 1592 while (ex < EXT_LAST_EXTENT(eh)) { 1593 if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 1594 break; 1595 /* merge with next extent! */ 1596 if (ext4_ext_is_uninitialized(ex)) 1597 uninitialized = 1; 1598 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1599 + ext4_ext_get_actual_len(ex + 1)); 1600 if (uninitialized) 1601 ext4_ext_mark_uninitialized(ex); 1602 1603 if (ex + 1 < EXT_LAST_EXTENT(eh)) { 1604 len = (EXT_LAST_EXTENT(eh) - ex - 1) 1605 * sizeof(struct ext4_extent); 1606 memmove(ex + 1, ex + 2, len); 1607 } 1608 le16_add_cpu(&eh->eh_entries, -1); 1609 merge_done = 1; 1610 WARN_ON(eh->eh_entries == 0); 1611 if (!eh->eh_entries) 1612 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 1613 } 1614 1615 return merge_done; 1616} 1617 1618/* 1619 * This function tries to merge the @ex extent to neighbours in the tree. 1620 * return 1 if merge left else 0. 1621 */ 1622static int ext4_ext_try_to_merge(struct inode *inode, 1623 struct ext4_ext_path *path, 1624 struct ext4_extent *ex) { 1625 struct ext4_extent_header *eh; 1626 unsigned int depth; 1627 int merge_done = 0; 1628 int ret = 0; 1629 1630 depth = ext_depth(inode); 1631 BUG_ON(path[depth].p_hdr == NULL); 1632 eh = path[depth].p_hdr; 1633 1634 if (ex > EXT_FIRST_EXTENT(eh)) 1635 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1636 1637 if (!merge_done) 1638 ret = ext4_ext_try_to_merge_right(inode, path, ex); 1639 1640 return ret; 1641} 1642 1643/* 1644 * check if a portion of the "newext" extent overlaps with an 1645 * existing extent. 1646 * 1647 * If there is an overlap discovered, it updates the length of the newext 1648 * such that there will be no overlap, and then returns 1. 1649 * If there is no overlap found, it returns 0. 1650 */ 1651static unsigned int ext4_ext_check_overlap(struct inode *inode, 1652 struct ext4_extent *newext, 1653 struct ext4_ext_path *path) 1654{ 1655 ext4_lblk_t b1, b2; 1656 unsigned int depth, len1; 1657 unsigned int ret = 0; 1658 1659 b1 = le32_to_cpu(newext->ee_block); 1660 len1 = ext4_ext_get_actual_len(newext); 1661 depth = ext_depth(inode); 1662 if (!path[depth].p_ext) 1663 goto out; 1664 b2 = le32_to_cpu(path[depth].p_ext->ee_block); 1665 1666 /* 1667 * get the next allocated block if the extent in the path 1668 * is before the requested block(s) 1669 */ 1670 if (b2 < b1) { 1671 b2 = ext4_ext_next_allocated_block(path); 1672 if (b2 == EXT_MAX_BLOCK) 1673 goto out; 1674 } 1675 1676 /* check for wrap through zero on extent logical start block*/ 1677 if (b1 + len1 < b1) { 1678 len1 = EXT_MAX_BLOCK - b1; 1679 newext->ee_len = cpu_to_le16(len1); 1680 ret = 1; 1681 } 1682 1683 /* check for overlap */ 1684 if (b1 + len1 > b2) { 1685 newext->ee_len = cpu_to_le16(b2 - b1); 1686 ret = 1; 1687 } 1688out: 1689 return ret; 1690} 1691 1692/* 1693 * ext4_ext_insert_extent: 1694 * tries to merge requsted extent into the existing extent or 1695 * inserts requested extent as new one into the tree, 1696 * creating new leaf in the no-space case. 1697 */ 1698int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1699 struct ext4_ext_path *path, 1700 struct ext4_extent *newext, int flag) 1701{ 1702 struct ext4_extent_header *eh; 1703 struct ext4_extent *ex, *fex; 1704 struct ext4_extent *nearex; /* nearest extent */ 1705 struct ext4_ext_path *npath = NULL; 1706 int depth, len, err; 1707 ext4_lblk_t next; 1708 unsigned uninitialized = 0; 1709 int flags = 0; 1710 1711 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1712 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1713 return -EIO; 1714 } 1715 depth = ext_depth(inode); 1716 ex = path[depth].p_ext; 1717 if (unlikely(path[depth].p_hdr == NULL)) { 1718 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1719 return -EIO; 1720 } 1721 1722 /* try to insert block into found extent and return */ 1723 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) 1724 && ext4_can_extents_be_merged(inode, ex, newext)) { 1725 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", 1726 ext4_ext_is_uninitialized(newext), 1727 ext4_ext_get_actual_len(newext), 1728 le32_to_cpu(ex->ee_block), 1729 ext4_ext_is_uninitialized(ex), 1730 ext4_ext_get_actual_len(ex), 1731 ext4_ext_pblock(ex)); 1732 err = ext4_ext_get_access(handle, inode, path + depth); 1733 if (err) 1734 return err; 1735 1736 /* 1737 * ext4_can_extents_be_merged should have checked that either 1738 * both extents are uninitialized, or both aren't. Thus we 1739 * need to check only one of them here. 1740 */ 1741 if (ext4_ext_is_uninitialized(ex)) 1742 uninitialized = 1; 1743 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1744 + ext4_ext_get_actual_len(newext)); 1745 if (uninitialized) 1746 ext4_ext_mark_uninitialized(ex); 1747 eh = path[depth].p_hdr; 1748 nearex = ex; 1749 goto merge; 1750 } 1751 1752repeat: 1753 depth = ext_depth(inode); 1754 eh = path[depth].p_hdr; 1755 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1756 goto has_space; 1757 1758 /* probably next leaf has space for us? */ 1759 fex = EXT_LAST_EXTENT(eh); 1760 next = ext4_ext_next_leaf_block(inode, path); 1761 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) 1762 && next != EXT_MAX_BLOCK) { 1763 ext_debug("next leaf block - %d\n", next); 1764 BUG_ON(npath != NULL); 1765 npath = ext4_ext_find_extent(inode, next, NULL); 1766 if (IS_ERR(npath)) 1767 return PTR_ERR(npath); 1768 BUG_ON(npath->p_depth != path->p_depth); 1769 eh = npath[depth].p_hdr; 1770 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 1771 ext_debug("next leaf isn't full(%d)\n", 1772 le16_to_cpu(eh->eh_entries)); 1773 path = npath; 1774 goto repeat; 1775 } 1776 ext_debug("next leaf has no free space(%d,%d)\n", 1777 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 1778 } 1779 1780 /* 1781 * There is no free space in the found leaf. 1782 * We're gonna add a new leaf in the tree. 1783 */ 1784 if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) 1785 flags = EXT4_MB_USE_ROOT_BLOCKS; 1786 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); 1787 if (err) 1788 goto cleanup; 1789 depth = ext_depth(inode); 1790 eh = path[depth].p_hdr; 1791 1792has_space: 1793 nearex = path[depth].p_ext; 1794 1795 err = ext4_ext_get_access(handle, inode, path + depth); 1796 if (err) 1797 goto cleanup; 1798 1799 if (!nearex) { 1800 /* there is no extent in this leaf, create first one */ 1801 ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n", 1802 le32_to_cpu(newext->ee_block), 1803 ext4_ext_pblock(newext), 1804 ext4_ext_is_uninitialized(newext), 1805 ext4_ext_get_actual_len(newext)); 1806 path[depth].p_ext = EXT_FIRST_EXTENT(eh); 1807 } else if (le32_to_cpu(newext->ee_block) 1808 > le32_to_cpu(nearex->ee_block)) { 1809/* BUG_ON(newext->ee_block == nearex->ee_block); */ 1810 if (nearex != EXT_LAST_EXTENT(eh)) { 1811 len = EXT_MAX_EXTENT(eh) - nearex; 1812 len = (len - 1) * sizeof(struct ext4_extent); 1813 len = len < 0 ? 0 : len; 1814 ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, " 1815 "move %d from 0x%p to 0x%p\n", 1816 le32_to_cpu(newext->ee_block), 1817 ext4_ext_pblock(newext), 1818 ext4_ext_is_uninitialized(newext), 1819 ext4_ext_get_actual_len(newext), 1820 nearex, len, nearex + 1, nearex + 2); 1821 memmove(nearex + 2, nearex + 1, len); 1822 } 1823 path[depth].p_ext = nearex + 1; 1824 } else { 1825 BUG_ON(newext->ee_block == nearex->ee_block); 1826 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); 1827 len = len < 0 ? 0 : len; 1828 ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, " 1829 "move %d from 0x%p to 0x%p\n", 1830 le32_to_cpu(newext->ee_block), 1831 ext4_ext_pblock(newext), 1832 ext4_ext_is_uninitialized(newext), 1833 ext4_ext_get_actual_len(newext), 1834 nearex, len, nearex + 1, nearex + 2); 1835 memmove(nearex + 1, nearex, len); 1836 path[depth].p_ext = nearex; 1837 } 1838 1839 le16_add_cpu(&eh->eh_entries, 1); 1840 nearex = path[depth].p_ext; 1841 nearex->ee_block = newext->ee_block; 1842 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 1843 nearex->ee_len = newext->ee_len; 1844 1845merge: 1846 /* try to merge extents to the right */ 1847 if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) 1848 ext4_ext_try_to_merge(inode, path, nearex); 1849 1850 /* try to merge extents to the left */ 1851 1852 /* time to correct all indexes above */ 1853 err = ext4_ext_correct_indexes(handle, inode, path); 1854 if (err) 1855 goto cleanup; 1856 1857 err = ext4_ext_dirty(handle, inode, path + depth); 1858 1859cleanup: 1860 if (npath) { 1861 ext4_ext_drop_refs(npath); 1862 kfree(npath); 1863 } 1864 ext4_ext_invalidate_cache(inode); 1865 return err; 1866} 1867 1868static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, 1869 ext4_lblk_t num, ext_prepare_callback func, 1870 void *cbdata) 1871{ 1872 struct ext4_ext_path *path = NULL; 1873 struct ext4_ext_cache cbex; 1874 struct ext4_extent *ex; 1875 ext4_lblk_t next, start = 0, end = 0; 1876 ext4_lblk_t last = block + num; 1877 int depth, exists, err = 0; 1878 1879 BUG_ON(func == NULL); 1880 BUG_ON(inode == NULL); 1881 1882 while (block < last && block != EXT_MAX_BLOCK) { 1883 num = last - block; 1884 /* find extent for this block */ 1885 down_read(&EXT4_I(inode)->i_data_sem); 1886 path = ext4_ext_find_extent(inode, block, path); 1887 up_read(&EXT4_I(inode)->i_data_sem); 1888 if (IS_ERR(path)) { 1889 err = PTR_ERR(path); 1890 path = NULL; 1891 break; 1892 } 1893 1894 depth = ext_depth(inode); 1895 if (unlikely(path[depth].p_hdr == NULL)) { 1896 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1897 err = -EIO; 1898 break; 1899 } 1900 ex = path[depth].p_ext; 1901 next = ext4_ext_next_allocated_block(path); 1902 1903 exists = 0; 1904 if (!ex) { 1905 /* there is no extent yet, so try to allocate 1906 * all requested space */ 1907 start = block; 1908 end = block + num; 1909 } else if (le32_to_cpu(ex->ee_block) > block) { 1910 /* need to allocate space before found extent */ 1911 start = block; 1912 end = le32_to_cpu(ex->ee_block); 1913 if (block + num < end) 1914 end = block + num; 1915 } else if (block >= le32_to_cpu(ex->ee_block) 1916 + ext4_ext_get_actual_len(ex)) { 1917 /* need to allocate space after found extent */ 1918 start = block; 1919 end = block + num; 1920 if (end >= next) 1921 end = next; 1922 } else if (block >= le32_to_cpu(ex->ee_block)) { 1923 /* 1924 * some part of requested space is covered 1925 * by found extent 1926 */ 1927 start = block; 1928 end = le32_to_cpu(ex->ee_block) 1929 + ext4_ext_get_actual_len(ex); 1930 if (block + num < end) 1931 end = block + num; 1932 exists = 1; 1933 } else { 1934 BUG(); 1935 } 1936 BUG_ON(end <= start); 1937 1938 if (!exists) { 1939 cbex.ec_block = start; 1940 cbex.ec_len = end - start; 1941 cbex.ec_start = 0; 1942 } else { 1943 cbex.ec_block = le32_to_cpu(ex->ee_block); 1944 cbex.ec_len = ext4_ext_get_actual_len(ex); 1945 cbex.ec_start = ext4_ext_pblock(ex); 1946 } 1947 1948 if (unlikely(cbex.ec_len == 0)) { 1949 EXT4_ERROR_INODE(inode, "cbex.ec_len == 0"); 1950 err = -EIO; 1951 break; 1952 } 1953 err = func(inode, path, &cbex, ex, cbdata); 1954 ext4_ext_drop_refs(path); 1955 1956 if (err < 0) 1957 break; 1958 1959 if (err == EXT_REPEAT) 1960 continue; 1961 else if (err == EXT_BREAK) { 1962 err = 0; 1963 break; 1964 } 1965 1966 if (ext_depth(inode) != depth) { 1967 /* depth was changed. we have to realloc path */ 1968 kfree(path); 1969 path = NULL; 1970 } 1971 1972 block = cbex.ec_block + cbex.ec_len; 1973 } 1974 1975 if (path) { 1976 ext4_ext_drop_refs(path); 1977 kfree(path); 1978 } 1979 1980 return err; 1981} 1982 1983static void 1984ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, 1985 __u32 len, ext4_fsblk_t start) 1986{ 1987 struct ext4_ext_cache *cex; 1988 BUG_ON(len == 0); 1989 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1990 cex = &EXT4_I(inode)->i_cached_extent; 1991 cex->ec_block = block; 1992 cex->ec_len = len; 1993 cex->ec_start = start; 1994 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1995} 1996 1997/* 1998 * ext4_ext_put_gap_in_cache: 1999 * calculate boundaries of the gap that the requested block fits into 2000 * and cache this gap 2001 */ 2002static void 2003ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 2004 ext4_lblk_t block) 2005{ 2006 int depth = ext_depth(inode); 2007 unsigned long len; 2008 ext4_lblk_t lblock; 2009 struct ext4_extent *ex; 2010 2011 ex = path[depth].p_ext; 2012 if (ex == NULL) { 2013 /* there is no extent yet, so gap is [0;-] */ 2014 lblock = 0; 2015 len = EXT_MAX_BLOCK; 2016 ext_debug("cache gap(whole file):"); 2017 } else if (block < le32_to_cpu(ex->ee_block)) { 2018 lblock = block; 2019 len = le32_to_cpu(ex->ee_block) - block; 2020 ext_debug("cache gap(before): %u [%u:%u]", 2021 block, 2022 le32_to_cpu(ex->ee_block), 2023 ext4_ext_get_actual_len(ex)); 2024 } else if (block >= le32_to_cpu(ex->ee_block) 2025 + ext4_ext_get_actual_len(ex)) { 2026 ext4_lblk_t next; 2027 lblock = le32_to_cpu(ex->ee_block) 2028 + ext4_ext_get_actual_len(ex); 2029 2030 next = ext4_ext_next_allocated_block(path); 2031 ext_debug("cache gap(after): [%u:%u] %u", 2032 le32_to_cpu(ex->ee_block), 2033 ext4_ext_get_actual_len(ex), 2034 block); 2035 BUG_ON(next == lblock); 2036 len = next - lblock; 2037 } else { 2038 lblock = len = 0; 2039 BUG(); 2040 } 2041 2042 ext_debug(" -> %u:%lu\n", lblock, len); 2043 ext4_ext_put_in_cache(inode, lblock, len, 0); 2044} 2045 2046/* 2047 * Return 0 if cache is invalid; 1 if the cache is valid 2048 */ 2049static int 2050ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, 2051 struct ext4_extent *ex) 2052{ 2053 struct ext4_ext_cache *cex; 2054 struct ext4_sb_info *sbi; 2055 int ret = 0; 2056 2057 /* 2058 * We borrow i_block_reservation_lock to protect i_cached_extent 2059 */ 2060 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 2061 cex = &EXT4_I(inode)->i_cached_extent; 2062 sbi = EXT4_SB(inode->i_sb); 2063 2064 /* has cache valid data? */ 2065 if (cex->ec_len == 0) 2066 goto errout; 2067 2068 if (in_range(block, cex->ec_block, cex->ec_len)) { 2069 ex->ee_block = cpu_to_le32(cex->ec_block); 2070 ext4_ext_store_pblock(ex, cex->ec_start); 2071 ex->ee_len = cpu_to_le16(cex->ec_len); 2072 ext_debug("%u cached by %u:%u:%llu\n", 2073 block, 2074 cex->ec_block, cex->ec_len, cex->ec_start); 2075 ret = 1; 2076 } 2077errout: 2078 if (!ret) 2079 sbi->extent_cache_misses++; 2080 else 2081 sbi->extent_cache_hits++; 2082 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 2083 return ret; 2084} 2085 2086/* 2087 * ext4_ext_rm_idx: 2088 * removes index from the index block. 2089 * It's used in truncate case only, thus all requests are for 2090 * last index in the block only. 2091 */ 2092static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2093 struct ext4_ext_path *path) 2094{ 2095 int err; 2096 ext4_fsblk_t leaf; 2097 2098 /* free index block */ 2099 path--; 2100 leaf = ext4_idx_pblock(path->p_idx); 2101 if (unlikely(path->p_hdr->eh_entries == 0)) { 2102 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2103 return -EIO; 2104 } 2105 err = ext4_ext_get_access(handle, inode, path); 2106 if (err) 2107 return err; 2108 le16_add_cpu(&path->p_hdr->eh_entries, -1); 2109 err = ext4_ext_dirty(handle, inode, path); 2110 if (err) 2111 return err; 2112 ext_debug("index is empty, remove it, free block %llu\n", leaf); 2113 ext4_free_blocks(handle, inode, NULL, leaf, 1, 2114 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2115 return err; 2116} 2117 2118/* 2119 * ext4_ext_calc_credits_for_single_extent: 2120 * This routine returns max. credits that needed to insert an extent 2121 * to the extent tree. 2122 * When pass the actual path, the caller should calculate credits 2123 * under i_data_sem. 2124 */ 2125int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2126 struct ext4_ext_path *path) 2127{ 2128 if (path) { 2129 int depth = ext_depth(inode); 2130 int ret = 0; 2131 2132 /* probably there is space in leaf? */ 2133 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2134 < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2135 2136 /* 2137 * There are some space in the leaf tree, no 2138 * need to account for leaf block credit 2139 * 2140 * bitmaps and block group descriptor blocks 2141 * and other metadat blocks still need to be 2142 * accounted. 2143 */ 2144 /* 1 bitmap, 1 block group descriptor */ 2145 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 2146 return ret; 2147 } 2148 } 2149 2150 return ext4_chunk_trans_blocks(inode, nrblocks); 2151} 2152 2153/* 2154 * How many index/leaf blocks need to change/allocate to modify nrblocks? 2155 * 2156 * if nrblocks are fit in a single extent (chunk flag is 1), then 2157 * in the worse case, each tree level index/leaf need to be changed 2158 * if the tree split due to insert a new extent, then the old tree 2159 * index/leaf need to be updated too 2160 * 2161 * If the nrblocks are discontiguous, they could cause 2162 * the whole tree split more than once, but this is really rare. 2163 */ 2164int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 2165{ 2166 int index; 2167 int depth = ext_depth(inode); 2168 2169 if (chunk) 2170 index = depth * 2; 2171 else 2172 index = depth * 3; 2173 2174 return index; 2175} 2176 2177static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2178 struct ext4_extent *ex, 2179 ext4_lblk_t from, ext4_lblk_t to) 2180{ 2181 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2182 int flags = EXT4_FREE_BLOCKS_FORGET; 2183 2184 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2185 flags |= EXT4_FREE_BLOCKS_METADATA; 2186#ifdef EXTENTS_STATS 2187 { 2188 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2189 spin_lock(&sbi->s_ext_stats_lock); 2190 sbi->s_ext_blocks += ee_len; 2191 sbi->s_ext_extents++; 2192 if (ee_len < sbi->s_ext_min) 2193 sbi->s_ext_min = ee_len; 2194 if (ee_len > sbi->s_ext_max) 2195 sbi->s_ext_max = ee_len; 2196 if (ext_depth(inode) > sbi->s_depth_max) 2197 sbi->s_depth_max = ext_depth(inode); 2198 spin_unlock(&sbi->s_ext_stats_lock); 2199 } 2200#endif 2201 if (from >= le32_to_cpu(ex->ee_block) 2202 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2203 /* tail removal */ 2204 ext4_lblk_t num; 2205 ext4_fsblk_t start; 2206 2207 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2208 start = ext4_ext_pblock(ex) + ee_len - num; 2209 ext_debug("free last %u blocks starting %llu\n", num, start); 2210 ext4_free_blocks(handle, inode, NULL, start, num, flags); 2211 } else if (from == le32_to_cpu(ex->ee_block) 2212 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2213 /* head removal */ 2214 ext4_lblk_t num; 2215 ext4_fsblk_t start; 2216 2217 num = to - from; 2218 start = ext4_ext_pblock(ex); 2219 2220 ext_debug("free first %u blocks starting %llu\n", num, start); 2221 ext4_free_blocks(handle, inode, 0, start, num, flags); 2222 2223 } else { 2224 printk(KERN_INFO "strange request: removal(2) " 2225 "%u-%u from %u:%u\n", 2226 from, to, le32_to_cpu(ex->ee_block), ee_len); 2227 } 2228 return 0; 2229} 2230 2231 2232/* 2233 * ext4_ext_rm_leaf() Removes the extents associated with the 2234 * blocks appearing between "start" and "end", and splits the extents 2235 * if "start" and "end" appear in the same extent 2236 * 2237 * @handle: The journal handle 2238 * @inode: The files inode 2239 * @path: The path to the leaf 2240 * @start: The first block to remove 2241 * @end: The last block to remove 2242 */ 2243static int 2244ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2245 struct ext4_ext_path *path, ext4_lblk_t start, 2246 ext4_lblk_t end) 2247{ 2248 int err = 0, correct_index = 0; 2249 int depth = ext_depth(inode), credits; 2250 struct ext4_extent_header *eh; 2251 ext4_lblk_t a, b, block; 2252 unsigned num; 2253 ext4_lblk_t ex_ee_block; 2254 unsigned short ex_ee_len; 2255 unsigned uninitialized = 0; 2256 struct ext4_extent *ex; 2257 struct ext4_map_blocks map; 2258 2259 /* the header must be checked already in ext4_ext_remove_space() */ 2260 ext_debug("truncate since %u in leaf\n", start); 2261 if (!path[depth].p_hdr) 2262 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2263 eh = path[depth].p_hdr; 2264 if (unlikely(path[depth].p_hdr == NULL)) { 2265 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2266 return -EIO; 2267 } 2268 /* find where to start removing */ 2269 ex = EXT_LAST_EXTENT(eh); 2270 2271 ex_ee_block = le32_to_cpu(ex->ee_block); 2272 ex_ee_len = ext4_ext_get_actual_len(ex); 2273 2274 while (ex >= EXT_FIRST_EXTENT(eh) && 2275 ex_ee_block + ex_ee_len > start) { 2276 2277 if (ext4_ext_is_uninitialized(ex)) 2278 uninitialized = 1; 2279 else 2280 uninitialized = 0; 2281 2282 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2283 uninitialized, ex_ee_len); 2284 path[depth].p_ext = ex; 2285 2286 a = ex_ee_block > start ? ex_ee_block : start; 2287 b = ex_ee_block+ex_ee_len - 1 < end ? 2288 ex_ee_block+ex_ee_len - 1 : end; 2289 2290 ext_debug(" border %u:%u\n", a, b); 2291 2292 /* If this extent is beyond the end of the hole, skip it */ 2293 if (end <= ex_ee_block) { 2294 ex--; 2295 ex_ee_block = le32_to_cpu(ex->ee_block); 2296 ex_ee_len = ext4_ext_get_actual_len(ex); 2297 continue; 2298 } else if (a != ex_ee_block && 2299 b != ex_ee_block + ex_ee_len - 1) { 2300 /* 2301 * If this is a truncate, then this condition should 2302 * never happen because at least one of the end points 2303 * needs to be on the edge of the extent. 2304 */ 2305 if (end == EXT_MAX_BLOCK) { 2306 ext_debug(" bad truncate %u:%u\n", 2307 start, end); 2308 block = 0; 2309 num = 0; 2310 err = -EIO; 2311 goto out; 2312 } 2313 /* 2314 * else this is a hole punch, so the extent needs to 2315 * be split since neither edge of the hole is on the 2316 * extent edge 2317 */ 2318 else{ 2319 map.m_pblk = ext4_ext_pblock(ex); 2320 map.m_lblk = ex_ee_block; 2321 map.m_len = b - ex_ee_block; 2322 2323 err = ext4_split_extent(handle, 2324 inode, path, &map, 0, 2325 EXT4_GET_BLOCKS_PUNCH_OUT_EXT | 2326 EXT4_GET_BLOCKS_PRE_IO); 2327 2328 if (err < 0) 2329 goto out; 2330 2331 ex_ee_len = ext4_ext_get_actual_len(ex); 2332 2333 b = ex_ee_block+ex_ee_len - 1 < end ? 2334 ex_ee_block+ex_ee_len - 1 : end; 2335 2336 /* Then remove tail of this extent */ 2337 block = ex_ee_block; 2338 num = a - block; 2339 } 2340 } else if (a != ex_ee_block) { 2341 /* remove tail of the extent */ 2342 block = ex_ee_block; 2343 num = a - block; 2344 } else if (b != ex_ee_block + ex_ee_len - 1) { 2345 /* remove head of the extent */ 2346 block = b; 2347 num = ex_ee_block + ex_ee_len - b; 2348 2349 /* 2350 * If this is a truncate, this condition 2351 * should never happen 2352 */ 2353 if (end == EXT_MAX_BLOCK) { 2354 ext_debug(" bad truncate %u:%u\n", 2355 start, end); 2356 err = -EIO; 2357 goto out; 2358 } 2359 } else { 2360 /* remove whole extent: excellent! */ 2361 block = ex_ee_block; 2362 num = 0; 2363 if (a != ex_ee_block) { 2364 ext_debug(" bad truncate %u:%u\n", 2365 start, end); 2366 err = -EIO; 2367 goto out; 2368 } 2369 2370 if (b != ex_ee_block + ex_ee_len - 1) { 2371 ext_debug(" bad truncate %u:%u\n", 2372 start, end); 2373 err = -EIO; 2374 goto out; 2375 } 2376 } 2377 2378 /* 2379 * 3 for leaf, sb, and inode plus 2 (bmap and group 2380 * descriptor) for each block group; assume two block 2381 * groups plus ex_ee_len/blocks_per_block_group for 2382 * the worst case 2383 */ 2384 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2385 if (ex == EXT_FIRST_EXTENT(eh)) { 2386 correct_index = 1; 2387 credits += (ext_depth(inode)) + 1; 2388 } 2389 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2390 2391 err = ext4_ext_truncate_extend_restart(handle, inode, credits); 2392 if (err) 2393 goto out; 2394 2395 err = ext4_ext_get_access(handle, inode, path + depth); 2396 if (err) 2397 goto out; 2398 2399 err = ext4_remove_blocks(handle, inode, ex, a, b); 2400 if (err) 2401 goto out; 2402 2403 if (num == 0) { 2404 /* this extent is removed; mark slot entirely unused */ 2405 ext4_ext_store_pblock(ex, 0); 2406 } else if (block != ex_ee_block) { 2407 /* 2408 * If this was a head removal, then we need to update 2409 * the physical block since it is now at a different 2410 * location 2411 */ 2412 ext4_ext_store_pblock(ex, ext4_ext_pblock(ex) + (b-a)); 2413 } 2414 2415 ex->ee_block = cpu_to_le32(block); 2416 ex->ee_len = cpu_to_le16(num); 2417 /* 2418 * Do not mark uninitialized if all the blocks in the 2419 * extent have been removed. 2420 */ 2421 if (uninitialized && num) 2422 ext4_ext_mark_uninitialized(ex); 2423 2424 err = ext4_ext_dirty(handle, inode, path + depth); 2425 if (err) 2426 goto out; 2427 2428 /* 2429 * If the extent was completely released, 2430 * we need to remove it from the leaf 2431 */ 2432 if (num == 0) { 2433 if (end != EXT_MAX_BLOCK) { 2434 /* 2435 * For hole punching, we need to scoot all the 2436 * extents up when an extent is removed so that 2437 * we dont have blank extents in the middle 2438 */ 2439 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2440 sizeof(struct ext4_extent)); 2441 2442 /* Now get rid of the one at the end */ 2443 memset(EXT_LAST_EXTENT(eh), 0, 2444 sizeof(struct ext4_extent)); 2445 } 2446 le16_add_cpu(&eh->eh_entries, -1); 2447 } 2448 2449 ext_debug("new extent: %u:%u:%llu\n", block, num, 2450 ext4_ext_pblock(ex)); 2451 ex--; 2452 ex_ee_block = le32_to_cpu(ex->ee_block); 2453 ex_ee_len = ext4_ext_get_actual_len(ex); 2454 } 2455 2456 if (correct_index && eh->eh_entries) 2457 err = ext4_ext_correct_indexes(handle, inode, path); 2458 2459 /* if this leaf is free, then we should 2460 * remove it from index block above */ 2461 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2462 err = ext4_ext_rm_idx(handle, inode, path + depth); 2463 2464out: 2465 return err; 2466} 2467 2468/* 2469 * ext4_ext_more_to_rm: 2470 * returns 1 if current index has to be freed (even partial) 2471 */ 2472static int 2473ext4_ext_more_to_rm(struct ext4_ext_path *path) 2474{ 2475 BUG_ON(path->p_idx == NULL); 2476 2477 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2478 return 0; 2479 2480 /* 2481 * if truncate on deeper level happened, it wasn't partial, 2482 * so we have to consider current index for truncation 2483 */ 2484 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2485 return 0; 2486 return 1; 2487} 2488 2489static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 2490 ext4_lblk_t end) 2491{ 2492 struct super_block *sb = inode->i_sb; 2493 int depth = ext_depth(inode); 2494 struct ext4_ext_path *path; 2495 handle_t *handle; 2496 int i, err; 2497 2498 ext_debug("truncate since %u\n", start); 2499 2500 /* probably first extent we're gonna free will be last in block */ 2501 handle = ext4_journal_start(inode, depth + 1); 2502 if (IS_ERR(handle)) 2503 return PTR_ERR(handle); 2504 2505again: 2506 ext4_ext_invalidate_cache(inode); 2507 2508 /* 2509 * We start scanning from right side, freeing all the blocks 2510 * after i_size and walking into the tree depth-wise. 2511 */ 2512 depth = ext_depth(inode); 2513 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); 2514 if (path == NULL) { 2515 ext4_journal_stop(handle); 2516 return -ENOMEM; 2517 } 2518 path[0].p_depth = depth; 2519 path[0].p_hdr = ext_inode_hdr(inode); 2520 if (ext4_ext_check(inode, path[0].p_hdr, depth)) { 2521 err = -EIO; 2522 goto out; 2523 } 2524 i = err = 0; 2525 2526 while (i >= 0 && err == 0) { 2527 if (i == depth) { 2528 /* this is leaf block */ 2529 err = ext4_ext_rm_leaf(handle, inode, path, 2530 start, end); 2531 /* root level has p_bh == NULL, brelse() eats this */ 2532 brelse(path[i].p_bh); 2533 path[i].p_bh = NULL; 2534 i--; 2535 continue; 2536 } 2537 2538 /* this is index block */ 2539 if (!path[i].p_hdr) { 2540 ext_debug("initialize header\n"); 2541 path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2542 } 2543 2544 if (!path[i].p_idx) { 2545 /* this level hasn't been touched yet */ 2546 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2547 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2548 ext_debug("init index ptr: hdr 0x%p, num %d\n", 2549 path[i].p_hdr, 2550 le16_to_cpu(path[i].p_hdr->eh_entries)); 2551 } else { 2552 /* we were already here, see at next index */ 2553 path[i].p_idx--; 2554 } 2555 2556 ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2557 i, EXT_FIRST_INDEX(path[i].p_hdr), 2558 path[i].p_idx); 2559 if (ext4_ext_more_to_rm(path + i)) { 2560 struct buffer_head *bh; 2561 /* go to the next level */ 2562 ext_debug("move to level %d (block %llu)\n", 2563 i + 1, ext4_idx_pblock(path[i].p_idx)); 2564 memset(path + i + 1, 0, sizeof(*path)); 2565 bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); 2566 if (!bh) { 2567 /* should we reset i_size? */ 2568 err = -EIO; 2569 break; 2570 } 2571 if (WARN_ON(i + 1 > depth)) { 2572 err = -EIO; 2573 break; 2574 } 2575 if (ext4_ext_check(inode, ext_block_hdr(bh), 2576 depth - i - 1)) { 2577 err = -EIO; 2578 break; 2579 } 2580 path[i + 1].p_bh = bh; 2581 2582 /* save actual number of indexes since this 2583 * number is changed at the next iteration */ 2584 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2585 i++; 2586 } else { 2587 /* we finished processing this index, go up */ 2588 if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2589 /* index is empty, remove it; 2590 * handle must be already prepared by the 2591 * truncatei_leaf() */ 2592 err = ext4_ext_rm_idx(handle, inode, path + i); 2593 } 2594 /* root level has p_bh == NULL, brelse() eats this */ 2595 brelse(path[i].p_bh); 2596 path[i].p_bh = NULL; 2597 i--; 2598 ext_debug("return to level %d\n", i); 2599 } 2600 } 2601 2602 /* TODO: flexible tree reduction should be here */ 2603 if (path->p_hdr->eh_entries == 0) { 2604 /* 2605 * truncate to zero freed all the tree, 2606 * so we need to correct eh_depth 2607 */ 2608 err = ext4_ext_get_access(handle, inode, path); 2609 if (err == 0) { 2610 ext_inode_hdr(inode)->eh_depth = 0; 2611 ext_inode_hdr(inode)->eh_max = 2612 cpu_to_le16(ext4_ext_space_root(inode, 0)); 2613 err = ext4_ext_dirty(handle, inode, path); 2614 } 2615 } 2616out: 2617 ext4_ext_drop_refs(path); 2618 kfree(path); 2619 if (err == -EAGAIN) 2620 goto again; 2621 ext4_journal_stop(handle); 2622 2623 return err; 2624} 2625 2626/* 2627 * called at mount time 2628 */ 2629void ext4_ext_init(struct super_block *sb) 2630{ 2631 /* 2632 * possible initialization would be here 2633 */ 2634 2635 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 2636#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 2637 printk(KERN_INFO "EXT4-fs: file extents enabled"); 2638#ifdef AGGRESSIVE_TEST 2639 printk(", aggressive tests"); 2640#endif 2641#ifdef CHECK_BINSEARCH 2642 printk(", check binsearch"); 2643#endif 2644#ifdef EXTENTS_STATS 2645 printk(", stats"); 2646#endif 2647 printk("\n"); 2648#endif 2649#ifdef EXTENTS_STATS 2650 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 2651 EXT4_SB(sb)->s_ext_min = 1 << 30; 2652 EXT4_SB(sb)->s_ext_max = 0; 2653#endif 2654 } 2655} 2656 2657/* 2658 * called at umount time 2659 */ 2660void ext4_ext_release(struct super_block *sb) 2661{ 2662 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 2663 return; 2664 2665#ifdef EXTENTS_STATS 2666 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 2667 struct ext4_sb_info *sbi = EXT4_SB(sb); 2668 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 2669 sbi->s_ext_blocks, sbi->s_ext_extents, 2670 sbi->s_ext_blocks / sbi->s_ext_extents); 2671 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 2672 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 2673 } 2674#endif 2675} 2676 2677/* FIXME!! we need to try to merge to left or right after zero-out */ 2678static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 2679{ 2680 ext4_fsblk_t ee_pblock; 2681 unsigned int ee_len; 2682 int ret; 2683 2684 ee_len = ext4_ext_get_actual_len(ex); 2685 ee_pblock = ext4_ext_pblock(ex); 2686 2687 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 2688 if (ret > 0) 2689 ret = 0; 2690 2691 return ret; 2692} 2693 2694/* 2695 * used by extent splitting. 2696 */ 2697#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 2698 due to ENOSPC */ 2699#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 2700#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 2701 2702/* 2703 * ext4_split_extent_at() splits an extent at given block. 2704 * 2705 * @handle: the journal handle 2706 * @inode: the file inode 2707 * @path: the path to the extent 2708 * @split: the logical block where the extent is splitted. 2709 * @split_flags: indicates if the extent could be zeroout if split fails, and 2710 * the states(init or uninit) of new extents. 2711 * @flags: flags used to insert new extent to extent tree. 2712 * 2713 * 2714 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 2715 * of which are deterimined by split_flag. 2716 * 2717 * There are two cases: 2718 * a> the extent are splitted into two extent. 2719 * b> split is not needed, and just mark the extent. 2720 * 2721 * return 0 on success. 2722 */ 2723static int ext4_split_extent_at(handle_t *handle, 2724 struct inode *inode, 2725 struct ext4_ext_path *path, 2726 ext4_lblk_t split, 2727 int split_flag, 2728 int flags) 2729{ 2730 ext4_fsblk_t newblock; 2731 ext4_lblk_t ee_block; 2732 struct ext4_extent *ex, newex, orig_ex; 2733 struct ext4_extent *ex2 = NULL; 2734 unsigned int ee_len, depth; 2735 int err = 0; 2736 2737 ext_debug("ext4_split_extents_at: inode %lu, logical" 2738 "block %llu\n", inode->i_ino, (unsigned long long)split); 2739 2740 ext4_ext_show_leaf(inode, path); 2741 2742 depth = ext_depth(inode); 2743 ex = path[depth].p_ext; 2744 ee_block = le32_to_cpu(ex->ee_block); 2745 ee_len = ext4_ext_get_actual_len(ex); 2746 newblock = split - ee_block + ext4_ext_pblock(ex); 2747 2748 BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 2749 2750 err = ext4_ext_get_access(handle, inode, path + depth); 2751 if (err) 2752 goto out; 2753 2754 if (split == ee_block) { 2755 /* 2756 * case b: block @split is the block that the extent begins with 2757 * then we just change the state of the extent, and splitting 2758 * is not needed. 2759 */ 2760 if (split_flag & EXT4_EXT_MARK_UNINIT2) 2761 ext4_ext_mark_uninitialized(ex); 2762 else 2763 ext4_ext_mark_initialized(ex); 2764 2765 if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 2766 ext4_ext_try_to_merge(inode, path, ex); 2767 2768 err = ext4_ext_dirty(handle, inode, path + depth); 2769 goto out; 2770 } 2771 2772 /* case a */ 2773 memcpy(&orig_ex, ex, sizeof(orig_ex)); 2774 ex->ee_len = cpu_to_le16(split - ee_block); 2775 if (split_flag & EXT4_EXT_MARK_UNINIT1) 2776 ext4_ext_mark_uninitialized(ex); 2777 2778 /* 2779 * path may lead to new leaf, not to original leaf any more 2780 * after ext4_ext_insert_extent() returns, 2781 */ 2782 err = ext4_ext_dirty(handle, inode, path + depth); 2783 if (err) 2784 goto fix_extent_len; 2785 2786 ex2 = &newex; 2787 ex2->ee_block = cpu_to_le32(split); 2788 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 2789 ext4_ext_store_pblock(ex2, newblock); 2790 if (split_flag & EXT4_EXT_MARK_UNINIT2) 2791 ext4_ext_mark_uninitialized(ex2); 2792 2793 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 2794 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 2795 err = ext4_ext_zeroout(inode, &orig_ex); 2796 if (err) 2797 goto fix_extent_len; 2798 /* update the extent length and mark as initialized */ 2799 ex->ee_len = cpu_to_le32(ee_len); 2800 ext4_ext_try_to_merge(inode, path, ex); 2801 err = ext4_ext_dirty(handle, inode, path + depth); 2802 goto out; 2803 } else if (err) 2804 goto fix_extent_len; 2805 2806out: 2807 ext4_ext_show_leaf(inode, path); 2808 return err; 2809 2810fix_extent_len: 2811 ex->ee_len = orig_ex.ee_len; 2812 ext4_ext_dirty(handle, inode, path + depth); 2813 return err; 2814} 2815 2816/* 2817 * ext4_split_extents() splits an extent and mark extent which is covered 2818 * by @map as split_flags indicates 2819 * 2820 * It may result in splitting the extent into multiple extents (upto three) 2821 * There are three possibilities: 2822 * a> There is no split required 2823 * b> Splits in two extents: Split is happening at either end of the extent 2824 * c> Splits in three extents: Somone is splitting in middle of the extent 2825 * 2826 */ 2827static int ext4_split_extent(handle_t *handle, 2828 struct inode *inode, 2829 struct ext4_ext_path *path, 2830 struct ext4_map_blocks *map, 2831 int split_flag, 2832 int flags) 2833{ 2834 ext4_lblk_t ee_block; 2835 struct ext4_extent *ex; 2836 unsigned int ee_len, depth; 2837 int err = 0; 2838 int uninitialized; 2839 int split_flag1, flags1; 2840 2841 depth = ext_depth(inode); 2842 ex = path[depth].p_ext; 2843 ee_block = le32_to_cpu(ex->ee_block); 2844 ee_len = ext4_ext_get_actual_len(ex); 2845 uninitialized = ext4_ext_is_uninitialized(ex); 2846 2847 if (map->m_lblk + map->m_len < ee_block + ee_len) { 2848 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? 2849 EXT4_EXT_MAY_ZEROOUT : 0; 2850 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 2851 if (uninitialized) 2852 split_flag1 |= EXT4_EXT_MARK_UNINIT1 | 2853 EXT4_EXT_MARK_UNINIT2; 2854 err = ext4_split_extent_at(handle, inode, path, 2855 map->m_lblk + map->m_len, split_flag1, flags1); 2856 if (err) 2857 goto out; 2858 } 2859 2860 ext4_ext_drop_refs(path); 2861 path = ext4_ext_find_extent(inode, map->m_lblk, path); 2862 if (IS_ERR(path)) 2863 return PTR_ERR(path); 2864 2865 if (map->m_lblk >= ee_block) { 2866 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? 2867 EXT4_EXT_MAY_ZEROOUT : 0; 2868 if (uninitialized) 2869 split_flag1 |= EXT4_EXT_MARK_UNINIT1; 2870 if (split_flag & EXT4_EXT_MARK_UNINIT2) 2871 split_flag1 |= EXT4_EXT_MARK_UNINIT2; 2872 err = ext4_split_extent_at(handle, inode, path, 2873 map->m_lblk, split_flag1, flags); 2874 if (err) 2875 goto out; 2876 } 2877 2878 ext4_ext_show_leaf(inode, path); 2879out: 2880 return err ? err : map->m_len; 2881} 2882 2883#define EXT4_EXT_ZERO_LEN 7 2884/* 2885 * This function is called by ext4_ext_map_blocks() if someone tries to write 2886 * to an uninitialized extent. It may result in splitting the uninitialized 2887 * extent into multiple extents (up to three - one initialized and two 2888 * uninitialized). 2889 * There are three possibilities: 2890 * a> There is no split required: Entire extent should be initialized 2891 * b> Splits in two extents: Write is happening at either end of the extent 2892 * c> Splits in three extents: Somone is writing in middle of the extent 2893 */ 2894static int ext4_ext_convert_to_initialized(handle_t *handle, 2895 struct inode *inode, 2896 struct ext4_map_blocks *map, 2897 struct ext4_ext_path *path) 2898{ 2899 struct ext4_map_blocks split_map; 2900 struct ext4_extent zero_ex; 2901 struct ext4_extent *ex; 2902 ext4_lblk_t ee_block, eof_block; 2903 unsigned int allocated, ee_len, depth; 2904 int err = 0; 2905 int split_flag = 0; 2906 2907 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 2908 "block %llu, max_blocks %u\n", inode->i_ino, 2909 (unsigned long long)map->m_lblk, map->m_len); 2910 2911 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 2912 inode->i_sb->s_blocksize_bits; 2913 if (eof_block < map->m_lblk + map->m_len) 2914 eof_block = map->m_lblk + map->m_len; 2915 2916 depth = ext_depth(inode); 2917 ex = path[depth].p_ext; 2918 ee_block = le32_to_cpu(ex->ee_block); 2919 ee_len = ext4_ext_get_actual_len(ex); 2920 allocated = ee_len - (map->m_lblk - ee_block); 2921 2922 WARN_ON(map->m_lblk < ee_block); 2923 /* 2924 * It is safe to convert extent to initialized via explicit 2925 * zeroout only if extent is fully insde i_size or new_size. 2926 */ 2927 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 2928 2929 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ 2930 if (ee_len <= 2*EXT4_EXT_ZERO_LEN && 2931 (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 2932 err = ext4_ext_zeroout(inode, ex); 2933 if (err) 2934 goto out; 2935 2936 err = ext4_ext_get_access(handle, inode, path + depth); 2937 if (err) 2938 goto out; 2939 ext4_ext_mark_initialized(ex); 2940 ext4_ext_try_to_merge(inode, path, ex); 2941 err = ext4_ext_dirty(handle, inode, path + depth); 2942 goto out; 2943 } 2944 2945 /* 2946 * four cases: 2947 * 1. split the extent into three extents. 2948 * 2. split the extent into two extents, zeroout the first half. 2949 * 3. split the extent into two extents, zeroout the second half. 2950 * 4. split the extent into two extents with out zeroout. 2951 */ 2952 split_map.m_lblk = map->m_lblk; 2953 split_map.m_len = map->m_len; 2954 2955 if (allocated > map->m_len) { 2956 if (allocated <= EXT4_EXT_ZERO_LEN && 2957 (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 2958 /* case 3 */ 2959 zero_ex.ee_block = 2960 cpu_to_le32(map->m_lblk); 2961 zero_ex.ee_len = cpu_to_le16(allocated); 2962 ext4_ext_store_pblock(&zero_ex, 2963 ext4_ext_pblock(ex) + map->m_lblk - ee_block); 2964 err = ext4_ext_zeroout(inode, &zero_ex); 2965 if (err) 2966 goto out; 2967 split_map.m_lblk = map->m_lblk; 2968 split_map.m_len = allocated; 2969 } else if ((map->m_lblk - ee_block + map->m_len < 2970 EXT4_EXT_ZERO_LEN) && 2971 (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 2972 /* case 2 */ 2973 if (map->m_lblk != ee_block) { 2974 zero_ex.ee_block = ex->ee_block; 2975 zero_ex.ee_len = cpu_to_le16(map->m_lblk - 2976 ee_block); 2977 ext4_ext_store_pblock(&zero_ex, 2978 ext4_ext_pblock(ex)); 2979 err = ext4_ext_zeroout(inode, &zero_ex); 2980 if (err) 2981 goto out; 2982 } 2983 2984 split_map.m_lblk = ee_block; 2985 split_map.m_len = map->m_lblk - ee_block + map->m_len; 2986 allocated = map->m_len; 2987 } 2988 } 2989 2990 allocated = ext4_split_extent(handle, inode, path, 2991 &split_map, split_flag, 0); 2992 if (allocated < 0) 2993 err = allocated; 2994 2995out: 2996 return err ? err : allocated; 2997} 2998 2999/* 3000 * This function is called by ext4_ext_map_blocks() from 3001 * ext4_get_blocks_dio_write() when DIO to write 3002 * to an uninitialized extent. 3003 * 3004 * Writing to an uninitialized extent may result in splitting the uninitialized 3005 * extent into multiple /initialized uninitialized extents (up to three) 3006 * There are three possibilities: 3007 * a> There is no split required: Entire extent should be uninitialized 3008 * b> Splits in two extents: Write is happening at either end of the extent 3009 * c> Splits in three extents: Somone is writing in middle of the extent 3010 * 3011 * One of more index blocks maybe needed if the extent tree grow after 3012 * the uninitialized extent split. To prevent ENOSPC occur at the IO 3013 * complete, we need to split the uninitialized extent before DIO submit 3014 * the IO. The uninitialized extent called at this time will be split 3015 * into three uninitialized extent(at most). After IO complete, the part 3016 * being filled will be convert to initialized by the end_io callback function 3017 * via ext4_convert_unwritten_extents(). 3018 * 3019 * Returns the size of uninitialized extent to be written on success. 3020 */ 3021static int ext4_split_unwritten_extents(handle_t *handle, 3022 struct inode *inode, 3023 struct ext4_map_blocks *map, 3024 struct ext4_ext_path *path, 3025 int flags) 3026{ 3027 ext4_lblk_t eof_block; 3028 ext4_lblk_t ee_block; 3029 struct ext4_extent *ex; 3030 unsigned int ee_len; 3031 int split_flag = 0, depth; 3032 3033 ext_debug("ext4_split_unwritten_extents: inode %lu, logical" 3034 "block %llu, max_blocks %u\n", inode->i_ino, 3035 (unsigned long long)map->m_lblk, map->m_len); 3036 3037 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 3038 inode->i_sb->s_blocksize_bits; 3039 if (eof_block < map->m_lblk + map->m_len) 3040 eof_block = map->m_lblk + map->m_len; 3041 /* 3042 * It is safe to convert extent to initialized via explicit 3043 * zeroout only if extent is fully insde i_size or new_size. 3044 */ 3045 depth = ext_depth(inode); 3046 ex = path[depth].p_ext; 3047 ee_block = le32_to_cpu(ex->ee_block); 3048 ee_len = ext4_ext_get_actual_len(ex); 3049 3050 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3051 split_flag |= EXT4_EXT_MARK_UNINIT2; 3052 3053 flags |= EXT4_GET_BLOCKS_PRE_IO; 3054 return ext4_split_extent(handle, inode, path, map, split_flag, flags); 3055} 3056 3057static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3058 struct inode *inode, 3059 struct ext4_ext_path *path) 3060{ 3061 struct ext4_extent *ex; 3062 struct ext4_extent_header *eh; 3063 int depth; 3064 int err = 0; 3065 3066 depth = ext_depth(inode); 3067 eh = path[depth].p_hdr; 3068 ex = path[depth].p_ext; 3069 3070 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3071 "block %llu, max_blocks %u\n", inode->i_ino, 3072 (unsigned long long)le32_to_cpu(ex->ee_block), 3073 ext4_ext_get_actual_len(ex)); 3074 3075 err = ext4_ext_get_access(handle, inode, path + depth); 3076 if (err) 3077 goto out; 3078 /* first mark the extent as initialized */ 3079 ext4_ext_mark_initialized(ex); 3080 3081 /* note: ext4_ext_correct_indexes() isn't needed here because 3082 * borders are not changed 3083 */ 3084 ext4_ext_try_to_merge(inode, path, ex); 3085 3086 /* Mark modified extent as dirty */ 3087 err = ext4_ext_dirty(handle, inode, path + depth); 3088out: 3089 ext4_ext_show_leaf(inode, path); 3090 return err; 3091} 3092 3093static void unmap_underlying_metadata_blocks(struct block_device *bdev, 3094 sector_t block, int count) 3095{ 3096 int i; 3097 for (i = 0; i < count; i++) 3098 unmap_underlying_metadata(bdev, block + i); 3099} 3100 3101/* 3102 * Handle EOFBLOCKS_FL flag, clearing it if necessary 3103 */ 3104static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3105 ext4_lblk_t lblk, 3106 struct ext4_ext_path *path, 3107 unsigned int len) 3108{ 3109 int i, depth; 3110 struct ext4_extent_header *eh; 3111 struct ext4_extent *last_ex; 3112 3113 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 3114 return 0; 3115 3116 depth = ext_depth(inode); 3117 eh = path[depth].p_hdr; 3118 3119 if (unlikely(!eh->eh_entries)) { 3120 EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and " 3121 "EOFBLOCKS_FL set"); 3122 return -EIO; 3123 } 3124 last_ex = EXT_LAST_EXTENT(eh); 3125 /* 3126 * We should clear the EOFBLOCKS_FL flag if we are writing the 3127 * last block in the last extent in the file. We test this by 3128 * first checking to see if the caller to 3129 * ext4_ext_get_blocks() was interested in the last block (or 3130 * a block beyond the last block) in the current extent. If 3131 * this turns out to be false, we can bail out from this 3132 * function immediately. 3133 */ 3134 if (lblk + len < le32_to_cpu(last_ex->ee_block) + 3135 ext4_ext_get_actual_len(last_ex)) 3136 return 0; 3137 /* 3138 * If the caller does appear to be planning to write at or 3139 * beyond the end of the current extent, we then test to see 3140 * if the current extent is the last extent in the file, by 3141 * checking to make sure it was reached via the rightmost node 3142 * at each level of the tree. 3143 */ 3144 for (i = depth-1; i >= 0; i--) 3145 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 3146 return 0; 3147 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3148 return ext4_mark_inode_dirty(handle, inode); 3149} 3150 3151static int 3152ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3153 struct ext4_map_blocks *map, 3154 struct ext4_ext_path *path, int flags, 3155 unsigned int allocated, ext4_fsblk_t newblock) 3156{ 3157 int ret = 0; 3158 int err = 0; 3159 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3160 3161 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" 3162 "block %llu, max_blocks %u, flags %d, allocated %u", 3163 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 3164 flags, allocated); 3165 ext4_ext_show_leaf(inode, path); 3166 3167 /* get_block() before submit the IO, split the extent */ 3168 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3169 ret = ext4_split_unwritten_extents(handle, inode, map, 3170 path, flags); 3171 /* 3172 * Flag the inode(non aio case) or end_io struct (aio case) 3173 * that this IO needs to conversion to written when IO is 3174 * completed 3175 */ 3176 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { 3177 io->flag = EXT4_IO_END_UNWRITTEN; 3178 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); 3179 } else 3180 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3181 if (ext4_should_dioread_nolock(inode)) 3182 map->m_flags |= EXT4_MAP_UNINIT; 3183 goto out; 3184 } 3185 /* IO end_io complete, convert the filled extent to written */ 3186 if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3187 ret = ext4_convert_unwritten_extents_endio(handle, inode, 3188 path); 3189 if (ret >= 0) { 3190 ext4_update_inode_fsync_trans(handle, inode, 1); 3191 err = check_eofblocks_fl(handle, inode, map->m_lblk, 3192 path, map->m_len); 3193 } else 3194 err = ret; 3195 goto out2; 3196 } 3197 /* buffered IO case */ 3198 /* 3199 * repeat fallocate creation request 3200 * we already have an unwritten extent 3201 */ 3202 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) 3203 goto map_out; 3204 3205 /* buffered READ or buffered write_begin() lookup */ 3206 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3207 /* 3208 * We have blocks reserved already. We 3209 * return allocated blocks so that delalloc 3210 * won't do block reservation for us. But 3211 * the buffer head will be unmapped so that 3212 * a read from the block returns 0s. 3213 */ 3214 map->m_flags |= EXT4_MAP_UNWRITTEN; 3215 goto out1; 3216 } 3217 3218 /* buffered write, writepage time, convert*/ 3219 ret = ext4_ext_convert_to_initialized(handle, inode, map, path); 3220 if (ret >= 0) { 3221 ext4_update_inode_fsync_trans(handle, inode, 1); 3222 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 3223 map->m_len); 3224 if (err < 0) 3225 goto out2; 3226 } 3227 3228out: 3229 if (ret <= 0) { 3230 err = ret; 3231 goto out2; 3232 } else 3233 allocated = ret; 3234 map->m_flags |= EXT4_MAP_NEW; 3235 /* 3236 * if we allocated more blocks than requested 3237 * we need to make sure we unmap the extra block 3238 * allocated. The actual needed block will get 3239 * unmapped later when we find the buffer_head marked 3240 * new. 3241 */ 3242 if (allocated > map->m_len) { 3243 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3244 newblock + map->m_len, 3245 allocated - map->m_len); 3246 allocated = map->m_len; 3247 } 3248 3249 /* 3250 * If we have done fallocate with the offset that is already 3251 * delayed allocated, we would have block reservation 3252 * and quota reservation done in the delayed write path. 3253 * But fallocate would have already updated quota and block 3254 * count for this offset. So cancel these reservation 3255 */ 3256 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 3257 ext4_da_update_reserve_space(inode, allocated, 0); 3258 3259map_out: 3260 map->m_flags |= EXT4_MAP_MAPPED; 3261out1: 3262 if (allocated > map->m_len) 3263 allocated = map->m_len; 3264 ext4_ext_show_leaf(inode, path); 3265 map->m_pblk = newblock; 3266 map->m_len = allocated; 3267out2: 3268 if (path) { 3269 ext4_ext_drop_refs(path); 3270 kfree(path); 3271 } 3272 return err ? err : allocated; 3273} 3274 3275/* 3276 * Block allocation/map/preallocation routine for extents based files 3277 * 3278 * 3279 * Need to be called with 3280 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 3281 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 3282 * 3283 * return > 0, number of of blocks already mapped/allocated 3284 * if create == 0 and these are pre-allocated blocks 3285 * buffer head is unmapped 3286 * otherwise blocks are mapped 3287 * 3288 * return = 0, if plain look up failed (blocks have not been allocated) 3289 * buffer head is unmapped 3290 * 3291 * return < 0, error case. 3292 */ 3293int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 3294 struct ext4_map_blocks *map, int flags) 3295{ 3296 struct ext4_ext_path *path = NULL; 3297 struct ext4_extent newex, *ex; 3298 ext4_fsblk_t newblock = 0; 3299 int err = 0, depth, ret; 3300 unsigned int allocated = 0; 3301 unsigned int punched_out = 0; 3302 unsigned int result = 0; 3303 struct ext4_allocation_request ar; 3304 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3305 struct ext4_map_blocks punch_map; 3306 3307 ext_debug("blocks %u/%u requested for inode %lu\n", 3308 map->m_lblk, map->m_len, inode->i_ino); 3309 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 3310 3311 /* check in cache */ 3312 if (ext4_ext_in_cache(inode, map->m_lblk, &newex) && 3313 ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0)) { 3314 if (!newex.ee_start_lo && !newex.ee_start_hi) { 3315 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3316 /* 3317 * block isn't allocated yet and 3318 * user doesn't want to allocate it 3319 */ 3320 goto out2; 3321 } 3322 /* we should allocate requested block */ 3323 } else { 3324 /* block is already allocated */ 3325 newblock = map->m_lblk 3326 - le32_to_cpu(newex.ee_block) 3327 + ext4_ext_pblock(&newex); 3328 /* number of remaining blocks in the extent */ 3329 allocated = ext4_ext_get_actual_len(&newex) - 3330 (map->m_lblk - le32_to_cpu(newex.ee_block)); 3331 goto out; 3332 } 3333 } 3334 3335 /* find extent for this block */ 3336 path = ext4_ext_find_extent(inode, map->m_lblk, NULL); 3337 if (IS_ERR(path)) { 3338 err = PTR_ERR(path); 3339 path = NULL; 3340 goto out2; 3341 } 3342 3343 depth = ext_depth(inode); 3344 3345 /* 3346 * consistent leaf must not be empty; 3347 * this situation is possible, though, _during_ tree modification; 3348 * this is why assert can't be put in ext4_ext_find_extent() 3349 */ 3350 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 3351 EXT4_ERROR_INODE(inode, "bad extent address " 3352 "lblock: %lu, depth: %d pblock %lld", 3353 (unsigned long) map->m_lblk, depth, 3354 path[depth].p_block); 3355 err = -EIO; 3356 goto out2; 3357 } 3358 3359 ex = path[depth].p_ext; 3360 if (ex) { 3361 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 3362 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 3363 unsigned short ee_len; 3364 3365 /* 3366 * Uninitialized extents are treated as holes, except that 3367 * we split out initialized portions during a write. 3368 */ 3369 ee_len = ext4_ext_get_actual_len(ex); 3370 /* if found extent covers block, simply return it */ 3371 if (in_range(map->m_lblk, ee_block, ee_len)) { 3372 newblock = map->m_lblk - ee_block + ee_start; 3373 /* number of remaining blocks in the extent */ 3374 allocated = ee_len - (map->m_lblk - ee_block); 3375 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 3376 ee_block, ee_len, newblock); 3377 3378 if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) { 3379 /* 3380 * Do not put uninitialized extent 3381 * in the cache 3382 */ 3383 if (!ext4_ext_is_uninitialized(ex)) { 3384 ext4_ext_put_in_cache(inode, ee_block, 3385 ee_len, ee_start); 3386 goto out; 3387 } 3388 ret = ext4_ext_handle_uninitialized_extents( 3389 handle, inode, map, path, flags, 3390 allocated, newblock); 3391 return ret; 3392 } 3393 3394 /* 3395 * Punch out the map length, but only to the 3396 * end of the extent 3397 */ 3398 punched_out = allocated < map->m_len ? 3399 allocated : map->m_len; 3400 3401 /* 3402 * Sense extents need to be converted to 3403 * uninitialized, they must fit in an 3404 * uninitialized extent 3405 */ 3406 if (punched_out > EXT_UNINIT_MAX_LEN) 3407 punched_out = EXT_UNINIT_MAX_LEN; 3408 3409 punch_map.m_lblk = map->m_lblk; 3410 punch_map.m_pblk = newblock; 3411 punch_map.m_len = punched_out; 3412 punch_map.m_flags = 0; 3413 3414 /* Check to see if the extent needs to be split */ 3415 if (punch_map.m_len != ee_len || 3416 punch_map.m_lblk != ee_block) { 3417 3418 ret = ext4_split_extent(handle, inode, 3419 path, &punch_map, 0, 3420 EXT4_GET_BLOCKS_PUNCH_OUT_EXT | 3421 EXT4_GET_BLOCKS_PRE_IO); 3422 3423 if (ret < 0) { 3424 err = ret; 3425 goto out2; 3426 } 3427 /* 3428 * find extent for the block at 3429 * the start of the hole 3430 */ 3431 ext4_ext_drop_refs(path); 3432 kfree(path); 3433 3434 path = ext4_ext_find_extent(inode, 3435 map->m_lblk, NULL); 3436 if (IS_ERR(path)) { 3437 err = PTR_ERR(path); 3438 path = NULL; 3439 goto out2; 3440 } 3441 3442 depth = ext_depth(inode); 3443 ex = path[depth].p_ext; 3444 ee_len = ext4_ext_get_actual_len(ex); 3445 ee_block = le32_to_cpu(ex->ee_block); 3446 ee_start = ext4_ext_pblock(ex); 3447 3448 } 3449 3450 ext4_ext_mark_uninitialized(ex); 3451 3452 err = ext4_ext_remove_space(inode, map->m_lblk, 3453 map->m_lblk + punched_out); 3454 3455 goto out2; 3456 } 3457 } 3458 3459 /* 3460 * requested block isn't allocated yet; 3461 * we couldn't try to create block if create flag is zero 3462 */ 3463 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3464 /* 3465 * put just found gap into cache to speed up 3466 * subsequent requests 3467 */ 3468 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); 3469 goto out2; 3470 } 3471 /* 3472 * Okay, we need to do block allocation. 3473 */ 3474 3475 /* find neighbour allocated blocks */ 3476 ar.lleft = map->m_lblk; 3477 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 3478 if (err) 3479 goto out2; 3480 ar.lright = map->m_lblk; 3481 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); 3482 if (err) 3483 goto out2; 3484 3485 /* 3486 * See if request is beyond maximum number of blocks we can have in 3487 * a single extent. For an initialized extent this limit is 3488 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 3489 * EXT_UNINIT_MAX_LEN. 3490 */ 3491 if (map->m_len > EXT_INIT_MAX_LEN && 3492 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3493 map->m_len = EXT_INIT_MAX_LEN; 3494 else if (map->m_len > EXT_UNINIT_MAX_LEN && 3495 (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3496 map->m_len = EXT_UNINIT_MAX_LEN; 3497 3498 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 3499 newex.ee_block = cpu_to_le32(map->m_lblk); 3500 newex.ee_len = cpu_to_le16(map->m_len); 3501 err = ext4_ext_check_overlap(inode, &newex, path); 3502 if (err) 3503 allocated = ext4_ext_get_actual_len(&newex); 3504 else 3505 allocated = map->m_len; 3506 3507 /* allocate new block */ 3508 ar.inode = inode; 3509 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 3510 ar.logical = map->m_lblk; 3511 ar.len = allocated; 3512 if (S_ISREG(inode->i_mode)) 3513 ar.flags = EXT4_MB_HINT_DATA; 3514 else 3515 /* disable in-core preallocation for non-regular files */ 3516 ar.flags = 0; 3517 newblock = ext4_mb_new_blocks(handle, &ar, &err); 3518 if (!newblock) 3519 goto out2; 3520 ext_debug("allocate new block: goal %llu, found %llu/%u\n", 3521 ar.goal, newblock, allocated); 3522 3523 /* try to insert new extent into found leaf and return */ 3524 ext4_ext_store_pblock(&newex, newblock); 3525 newex.ee_len = cpu_to_le16(ar.len); 3526 /* Mark uninitialized */ 3527 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ 3528 ext4_ext_mark_uninitialized(&newex); 3529 /* 3530 * io_end structure was created for every IO write to an 3531 * uninitialized extent. To avoid unnecessary conversion, 3532 * here we flag the IO that really needs the conversion. 3533 * For non asycn direct IO case, flag the inode state 3534 * that we need to perform conversion when IO is done. 3535 */ 3536 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3537 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { 3538 io->flag = EXT4_IO_END_UNWRITTEN; 3539 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); 3540 } else 3541 ext4_set_inode_state(inode, 3542 EXT4_STATE_DIO_UNWRITTEN); 3543 } 3544 if (ext4_should_dioread_nolock(inode)) 3545 map->m_flags |= EXT4_MAP_UNINIT; 3546 } 3547 3548 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); 3549 if (err) 3550 goto out2; 3551 3552 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3553 if (err) { 3554 /* free data blocks we just allocated */ 3555 /* not a good idea to call discard here directly, 3556 * but otherwise we'd need to call it every free() */ 3557 ext4_discard_preallocations(inode); 3558 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), 3559 ext4_ext_get_actual_len(&newex), 0); 3560 goto out2; 3561 } 3562 3563 /* previous routine could use block we allocated */ 3564 newblock = ext4_ext_pblock(&newex); 3565 allocated = ext4_ext_get_actual_len(&newex); 3566 if (allocated > map->m_len) 3567 allocated = map->m_len; 3568 map->m_flags |= EXT4_MAP_NEW; 3569 3570 /* 3571 * Update reserved blocks/metadata blocks after successful 3572 * block allocation which had been deferred till now. 3573 */ 3574 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 3575 ext4_da_update_reserve_space(inode, allocated, 1); 3576 3577 /* 3578 * Cache the extent and update transaction to commit on fdatasync only 3579 * when it is _not_ an uninitialized extent. 3580 */ 3581 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { 3582 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); 3583 ext4_update_inode_fsync_trans(handle, inode, 1); 3584 } else 3585 ext4_update_inode_fsync_trans(handle, inode, 0); 3586out: 3587 if (allocated > map->m_len) 3588 allocated = map->m_len; 3589 ext4_ext_show_leaf(inode, path); 3590 map->m_flags |= EXT4_MAP_MAPPED; 3591 map->m_pblk = newblock; 3592 map->m_len = allocated; 3593out2: 3594 if (path) { 3595 ext4_ext_drop_refs(path); 3596 kfree(path); 3597 } 3598 trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, 3599 newblock, map->m_len, err ? err : allocated); 3600 3601 result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ? 3602 punched_out : allocated; 3603 3604 return err ? err : result; 3605} 3606 3607void ext4_ext_truncate(struct inode *inode) 3608{ 3609 struct address_space *mapping = inode->i_mapping; 3610 struct super_block *sb = inode->i_sb; 3611 ext4_lblk_t last_block; 3612 handle_t *handle; 3613 int err = 0; 3614 3615 /* 3616 * finish any pending end_io work so we won't run the risk of 3617 * converting any truncated blocks to initialized later 3618 */ 3619 ext4_flush_completed_IO(inode); 3620 3621 /* 3622 * probably first extent we're gonna free will be last in block 3623 */ 3624 err = ext4_writepage_trans_blocks(inode); 3625 handle = ext4_journal_start(inode, err); 3626 if (IS_ERR(handle)) 3627 return; 3628 3629 if (inode->i_size & (sb->s_blocksize - 1)) 3630 ext4_block_truncate_page(handle, mapping, inode->i_size); 3631 3632 if (ext4_orphan_add(handle, inode)) 3633 goto out_stop; 3634 3635 down_write(&EXT4_I(inode)->i_data_sem); 3636 ext4_ext_invalidate_cache(inode); 3637 3638 ext4_discard_preallocations(inode); 3639 3640 /* 3641 * TODO: optimization is possible here. 3642 * Probably we need not scan at all, 3643 * because page truncation is enough. 3644 */ 3645 3646 /* we have to know where to truncate from in crash case */ 3647 EXT4_I(inode)->i_disksize = inode->i_size; 3648 ext4_mark_inode_dirty(handle, inode); 3649 3650 last_block = (inode->i_size + sb->s_blocksize - 1) 3651 >> EXT4_BLOCK_SIZE_BITS(sb); 3652 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK); 3653 3654 /* In a multi-transaction truncate, we only make the final 3655 * transaction synchronous. 3656 */ 3657 if (IS_SYNC(inode)) 3658 ext4_handle_sync(handle); 3659 3660 up_write(&EXT4_I(inode)->i_data_sem); 3661 3662out_stop: 3663 /* 3664 * If this was a simple ftruncate() and the file will remain alive, 3665 * then we need to clear up the orphan record which we created above. 3666 * However, if this was a real unlink then we were called by 3667 * ext4_delete_inode(), and we allow that function to clean up the 3668 * orphan info for us. 3669 */ 3670 if (inode->i_nlink) 3671 ext4_orphan_del(handle, inode); 3672 3673 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 3674 ext4_mark_inode_dirty(handle, inode); 3675 ext4_journal_stop(handle); 3676} 3677 3678static void ext4_falloc_update_inode(struct inode *inode, 3679 int mode, loff_t new_size, int update_ctime) 3680{ 3681 struct timespec now; 3682 3683 if (update_ctime) { 3684 now = current_fs_time(inode->i_sb); 3685 if (!timespec_equal(&inode->i_ctime, &now)) 3686 inode->i_ctime = now; 3687 } 3688 /* 3689 * Update only when preallocation was requested beyond 3690 * the file size. 3691 */ 3692 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 3693 if (new_size > i_size_read(inode)) 3694 i_size_write(inode, new_size); 3695 if (new_size > EXT4_I(inode)->i_disksize) 3696 ext4_update_i_disksize(inode, new_size); 3697 } else { 3698 /* 3699 * Mark that we allocate beyond EOF so the subsequent truncate 3700 * can proceed even if the new size is the same as i_size. 3701 */ 3702 if (new_size > i_size_read(inode)) 3703 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3704 } 3705 3706} 3707 3708/* 3709 * preallocate space for a file. This implements ext4's fallocate file 3710 * operation, which gets called from sys_fallocate system call. 3711 * For block-mapped files, posix_fallocate should fall back to the method 3712 * of writing zeroes to the required new blocks (the same behavior which is 3713 * expected for file systems which do not support fallocate() system call). 3714 */ 3715long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 3716{ 3717 struct inode *inode = file->f_path.dentry->d_inode; 3718 handle_t *handle; 3719 loff_t new_size; 3720 unsigned int max_blocks; 3721 int ret = 0; 3722 int ret2 = 0; 3723 int retries = 0; 3724 struct ext4_map_blocks map; 3725 unsigned int credits, blkbits = inode->i_blkbits; 3726 3727 /* We only support the FALLOC_FL_KEEP_SIZE mode */ 3728 if (mode & ~FALLOC_FL_KEEP_SIZE) 3729 return -EOPNOTSUPP; 3730 3731 /* 3732 * currently supporting (pre)allocate mode for extent-based 3733 * files _only_ 3734 */ 3735 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 3736 return -EOPNOTSUPP; 3737 3738 trace_ext4_fallocate_enter(inode, offset, len, mode); 3739 map.m_lblk = offset >> blkbits; 3740 /* 3741 * We can't just convert len to max_blocks because 3742 * If blocksize = 4096 offset = 3072 and len = 2048 3743 */ 3744 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 3745 - map.m_lblk; 3746 /* 3747 * credits to insert 1 extent into extent tree 3748 */ 3749 credits = ext4_chunk_trans_blocks(inode, max_blocks); 3750 mutex_lock(&inode->i_mutex); 3751 ret = inode_newsize_ok(inode, (len + offset)); 3752 if (ret) { 3753 mutex_unlock(&inode->i_mutex); 3754 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 3755 return ret; 3756 } 3757retry: 3758 while (ret >= 0 && ret < max_blocks) { 3759 map.m_lblk = map.m_lblk + ret; 3760 map.m_len = max_blocks = max_blocks - ret; 3761 handle = ext4_journal_start(inode, credits); 3762 if (IS_ERR(handle)) { 3763 ret = PTR_ERR(handle); 3764 break; 3765 } 3766 ret = ext4_map_blocks(handle, inode, &map, 3767 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT); 3768 if (ret <= 0) { 3769#ifdef EXT4FS_DEBUG 3770 WARN_ON(ret <= 0); 3771 printk(KERN_ERR "%s: ext4_ext_map_blocks " 3772 "returned error inode#%lu, block=%u, " 3773 "max_blocks=%u", __func__, 3774 inode->i_ino, map.m_lblk, max_blocks); 3775#endif 3776 ext4_mark_inode_dirty(handle, inode); 3777 ret2 = ext4_journal_stop(handle); 3778 break; 3779 } 3780 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, 3781 blkbits) >> blkbits)) 3782 new_size = offset + len; 3783 else 3784 new_size = (map.m_lblk + ret) << blkbits; 3785 3786 ext4_falloc_update_inode(inode, mode, new_size, 3787 (map.m_flags & EXT4_MAP_NEW)); 3788 ext4_mark_inode_dirty(handle, inode); 3789 ret2 = ext4_journal_stop(handle); 3790 if (ret2) 3791 break; 3792 } 3793 if (ret == -ENOSPC && 3794 ext4_should_retry_alloc(inode->i_sb, &retries)) { 3795 ret = 0; 3796 goto retry; 3797 } 3798 mutex_unlock(&inode->i_mutex); 3799 trace_ext4_fallocate_exit(inode, offset, max_blocks, 3800 ret > 0 ? ret2 : ret); 3801 return ret > 0 ? ret2 : ret; 3802} 3803 3804/* 3805 * This function convert a range of blocks to written extents 3806 * The caller of this function will pass the start offset and the size. 3807 * all unwritten extents within this range will be converted to 3808 * written extents. 3809 * 3810 * This function is called from the direct IO end io call back 3811 * function, to convert the fallocated extents after IO is completed. 3812 * Returns 0 on success. 3813 */ 3814int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 3815 ssize_t len) 3816{ 3817 handle_t *handle; 3818 unsigned int max_blocks; 3819 int ret = 0; 3820 int ret2 = 0; 3821 struct ext4_map_blocks map; 3822 unsigned int credits, blkbits = inode->i_blkbits; 3823 3824 map.m_lblk = offset >> blkbits; 3825 /* 3826 * We can't just convert len to max_blocks because 3827 * If blocksize = 4096 offset = 3072 and len = 2048 3828 */ 3829 max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - 3830 map.m_lblk); 3831 /* 3832 * credits to insert 1 extent into extent tree 3833 */ 3834 credits = ext4_chunk_trans_blocks(inode, max_blocks); 3835 while (ret >= 0 && ret < max_blocks) { 3836 map.m_lblk += ret; 3837 map.m_len = (max_blocks -= ret); 3838 handle = ext4_journal_start(inode, credits); 3839 if (IS_ERR(handle)) { 3840 ret = PTR_ERR(handle); 3841 break; 3842 } 3843 ret = ext4_map_blocks(handle, inode, &map, 3844 EXT4_GET_BLOCKS_IO_CONVERT_EXT); 3845 if (ret <= 0) { 3846 WARN_ON(ret <= 0); 3847 printk(KERN_ERR "%s: ext4_ext_map_blocks " 3848 "returned error inode#%lu, block=%u, " 3849 "max_blocks=%u", __func__, 3850 inode->i_ino, map.m_lblk, map.m_len); 3851 } 3852 ext4_mark_inode_dirty(handle, inode); 3853 ret2 = ext4_journal_stop(handle); 3854 if (ret <= 0 || ret2 ) 3855 break; 3856 } 3857 return ret > 0 ? ret2 : ret; 3858} 3859 3860/* 3861 * Callback function called for each extent to gather FIEMAP information. 3862 */ 3863static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, 3864 struct ext4_ext_cache *newex, struct ext4_extent *ex, 3865 void *data) 3866{ 3867 __u64 logical; 3868 __u64 physical; 3869 __u64 length; 3870 loff_t size; 3871 __u32 flags = 0; 3872 int ret = 0; 3873 struct fiemap_extent_info *fieinfo = data; 3874 unsigned char blksize_bits; 3875 3876 blksize_bits = inode->i_sb->s_blocksize_bits; 3877 logical = (__u64)newex->ec_block << blksize_bits; 3878 3879 if (newex->ec_start == 0) { 3880 /* 3881 * No extent in extent-tree contains block @newex->ec_start, 3882 * then the block may stay in 1)a hole or 2)delayed-extent. 3883 * 3884 * Holes or delayed-extents are processed as follows. 3885 * 1. lookup dirty pages with specified range in pagecache. 3886 * If no page is got, then there is no delayed-extent and 3887 * return with EXT_CONTINUE. 3888 * 2. find the 1st mapped buffer, 3889 * 3. check if the mapped buffer is both in the request range 3890 * and a delayed buffer. If not, there is no delayed-extent, 3891 * then return. 3892 * 4. a delayed-extent is found, the extent will be collected. 3893 */ 3894 ext4_lblk_t end = 0; 3895 pgoff_t last_offset; 3896 pgoff_t offset; 3897 pgoff_t index; 3898 pgoff_t start_index = 0; 3899 struct page **pages = NULL; 3900 struct buffer_head *bh = NULL; 3901 struct buffer_head *head = NULL; 3902 unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *); 3903 3904 pages = kmalloc(PAGE_SIZE, GFP_KERNEL); 3905 if (pages == NULL) 3906 return -ENOMEM; 3907 3908 offset = logical >> PAGE_SHIFT; 3909repeat: 3910 last_offset = offset; 3911 head = NULL; 3912 ret = find_get_pages_tag(inode->i_mapping, &offset, 3913 PAGECACHE_TAG_DIRTY, nr_pages, pages); 3914 3915 if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 3916 /* First time, try to find a mapped buffer. */ 3917 if (ret == 0) { 3918out: 3919 for (index = 0; index < ret; index++) 3920 page_cache_release(pages[index]); 3921 /* just a hole. */ 3922 kfree(pages); 3923 return EXT_CONTINUE; 3924 } 3925 index = 0; 3926 3927next_page: 3928 /* Try to find the 1st mapped buffer. */ 3929 end = ((__u64)pages[index]->index << PAGE_SHIFT) >> 3930 blksize_bits; 3931 if (!page_has_buffers(pages[index])) 3932 goto out; 3933 head = page_buffers(pages[index]); 3934 if (!head) 3935 goto out; 3936 3937 index++; 3938 bh = head; 3939 do { 3940 if (end >= newex->ec_block + 3941 newex->ec_len) 3942 /* The buffer is out of 3943 * the request range. 3944 */ 3945 goto out; 3946 3947 if (buffer_mapped(bh) && 3948 end >= newex->ec_block) { 3949 start_index = index - 1; 3950 /* get the 1st mapped buffer. */ 3951 goto found_mapped_buffer; 3952 } 3953 3954 bh = bh->b_this_page; 3955 end++; 3956 } while (bh != head); 3957 3958 /* No mapped buffer in the range found in this page, 3959 * We need to look up next page. 3960 */ 3961 if (index >= ret) { 3962 /* There is no page left, but we need to limit 3963 * newex->ec_len. 3964 */ 3965 newex->ec_len = end - newex->ec_block; 3966 goto out; 3967 } 3968 goto next_page; 3969 } else { 3970 /*Find contiguous delayed buffers. */ 3971 if (ret > 0 && pages[0]->index == last_offset) 3972 head = page_buffers(pages[0]); 3973 bh = head; 3974 index = 1; 3975 start_index = 0; 3976 } 3977 3978found_mapped_buffer: 3979 if (bh != NULL && buffer_delay(bh)) { 3980 /* 1st or contiguous delayed buffer found. */ 3981 if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 3982 /* 3983 * 1st delayed buffer found, record 3984 * the start of extent. 3985 */ 3986 flags |= FIEMAP_EXTENT_DELALLOC; 3987 newex->ec_block = end; 3988 logical = (__u64)end << blksize_bits; 3989 } 3990 /* Find contiguous delayed buffers. */ 3991 do { 3992 if (!buffer_delay(bh)) 3993 goto found_delayed_extent; 3994 bh = bh->b_this_page; 3995 end++; 3996 } while (bh != head); 3997 3998 for (; index < ret; index++) { 3999 if (!page_has_buffers(pages[index])) { 4000 bh = NULL; 4001 break; 4002 } 4003 head = page_buffers(pages[index]); 4004 if (!head) { 4005 bh = NULL; 4006 break; 4007 } 4008 4009 if (pages[index]->index != 4010 pages[start_index]->index + index 4011 - start_index) { 4012 /* Blocks are not contiguous. */ 4013 bh = NULL; 4014 break; 4015 } 4016 bh = head; 4017 do { 4018 if (!buffer_delay(bh)) 4019 /* Delayed-extent ends. */ 4020 goto found_delayed_extent; 4021 bh = bh->b_this_page; 4022 end++; 4023 } while (bh != head); 4024 } 4025 } else if (!(flags & FIEMAP_EXTENT_DELALLOC)) 4026 /* a hole found. */ 4027 goto out; 4028 4029found_delayed_extent: 4030 newex->ec_len = min(end - newex->ec_block, 4031 (ext4_lblk_t)EXT_INIT_MAX_LEN); 4032 if (ret == nr_pages && bh != NULL && 4033 newex->ec_len < EXT_INIT_MAX_LEN && 4034 buffer_delay(bh)) { 4035 /* Have not collected an extent and continue. */ 4036 for (index = 0; index < ret; index++) 4037 page_cache_release(pages[index]); 4038 goto repeat; 4039 } 4040 4041 for (index = 0; index < ret; index++) 4042 page_cache_release(pages[index]); 4043 kfree(pages); 4044 } 4045 4046 physical = (__u64)newex->ec_start << blksize_bits; 4047 length = (__u64)newex->ec_len << blksize_bits; 4048 4049 if (ex && ext4_ext_is_uninitialized(ex)) 4050 flags |= FIEMAP_EXTENT_UNWRITTEN; 4051 4052 size = i_size_read(inode); 4053 if (logical + length >= size) 4054 flags |= FIEMAP_EXTENT_LAST; 4055 4056 ret = fiemap_fill_next_extent(fieinfo, logical, physical, 4057 length, flags); 4058 if (ret < 0) 4059 return ret; 4060 if (ret == 1) 4061 return EXT_BREAK; 4062 return EXT_CONTINUE; 4063} 4064 4065/* fiemap flags we can handle specified here */ 4066#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 4067 4068static int ext4_xattr_fiemap(struct inode *inode, 4069 struct fiemap_extent_info *fieinfo) 4070{ 4071 __u64 physical = 0; 4072 __u64 length; 4073 __u32 flags = FIEMAP_EXTENT_LAST; 4074 int blockbits = inode->i_sb->s_blocksize_bits; 4075 int error = 0; 4076 4077 /* in-inode? */ 4078 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 4079 struct ext4_iloc iloc; 4080 int offset; /* offset of xattr in inode */ 4081 4082 error = ext4_get_inode_loc(inode, &iloc); 4083 if (error) 4084 return error; 4085 physical = iloc.bh->b_blocknr << blockbits; 4086 offset = EXT4_GOOD_OLD_INODE_SIZE + 4087 EXT4_I(inode)->i_extra_isize; 4088 physical += offset; 4089 length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 4090 flags |= FIEMAP_EXTENT_DATA_INLINE; 4091 brelse(iloc.bh); 4092 } else { /* external block */ 4093 physical = EXT4_I(inode)->i_file_acl << blockbits; 4094 length = inode->i_sb->s_blocksize; 4095 } 4096 4097 if (physical) 4098 error = fiemap_fill_next_extent(fieinfo, 0, physical, 4099 length, flags); 4100 return (error < 0 ? error : 0); 4101} 4102 4103int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4104 __u64 start, __u64 len) 4105{ 4106 ext4_lblk_t start_blk; 4107 int error = 0; 4108 4109 /* fallback to generic here if not in extents fmt */ 4110 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 4111 return generic_block_fiemap(inode, fieinfo, start, len, 4112 ext4_get_block); 4113 4114 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 4115 return -EBADR; 4116 4117 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 4118 error = ext4_xattr_fiemap(inode, fieinfo); 4119 } else { 4120 ext4_lblk_t len_blks; 4121 __u64 last_blk; 4122 4123 start_blk = start >> inode->i_sb->s_blocksize_bits; 4124 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4125 if (last_blk >= EXT_MAX_BLOCK) 4126 last_blk = EXT_MAX_BLOCK-1; 4127 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 4128 4129 /* 4130 * Walk the extent tree gathering extent information. 4131 * ext4_ext_fiemap_cb will push extents back to user. 4132 */ 4133 error = ext4_ext_walk_space(inode, start_blk, len_blks, 4134 ext4_ext_fiemap_cb, fieinfo); 4135 } 4136 4137 return error; 4138} 4139