extents.c revision d100eef2440fea13e4f09e88b1c8bcbca64beb9f
1/* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * Architecture independence: 6 * Copyright (c) 2005, Bull S.A. 7 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public Licens 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21 */ 22 23/* 24 * Extents support for EXT4 25 * 26 * TODO: 27 * - ext4*_error() should be used in some situations 28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29 * - smart tree reduction 30 */ 31 32#include <linux/fs.h> 33#include <linux/time.h> 34#include <linux/jbd2.h> 35#include <linux/highuid.h> 36#include <linux/pagemap.h> 37#include <linux/quotaops.h> 38#include <linux/string.h> 39#include <linux/slab.h> 40#include <linux/falloc.h> 41#include <asm/uaccess.h> 42#include <linux/fiemap.h> 43#include "ext4_jbd2.h" 44#include "ext4_extents.h" 45#include "xattr.h" 46 47#include <trace/events/ext4.h> 48 49/* 50 * used by extent splitting. 51 */ 52#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 53 due to ENOSPC */ 54#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 55#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 56 57#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 58#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 59 60static __le32 ext4_extent_block_csum(struct inode *inode, 61 struct ext4_extent_header *eh) 62{ 63 struct ext4_inode_info *ei = EXT4_I(inode); 64 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 65 __u32 csum; 66 67 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 68 EXT4_EXTENT_TAIL_OFFSET(eh)); 69 return cpu_to_le32(csum); 70} 71 72static int ext4_extent_block_csum_verify(struct inode *inode, 73 struct ext4_extent_header *eh) 74{ 75 struct ext4_extent_tail *et; 76 77 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 78 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 79 return 1; 80 81 et = find_ext4_extent_tail(eh); 82 if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 83 return 0; 84 return 1; 85} 86 87static void ext4_extent_block_csum_set(struct inode *inode, 88 struct ext4_extent_header *eh) 89{ 90 struct ext4_extent_tail *et; 91 92 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 93 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 94 return; 95 96 et = find_ext4_extent_tail(eh); 97 et->et_checksum = ext4_extent_block_csum(inode, eh); 98} 99 100static int ext4_split_extent(handle_t *handle, 101 struct inode *inode, 102 struct ext4_ext_path *path, 103 struct ext4_map_blocks *map, 104 int split_flag, 105 int flags); 106 107static int ext4_split_extent_at(handle_t *handle, 108 struct inode *inode, 109 struct ext4_ext_path *path, 110 ext4_lblk_t split, 111 int split_flag, 112 int flags); 113 114static int ext4_find_delayed_extent(struct inode *inode, 115 struct ext4_ext_cache *newex); 116 117static int ext4_ext_truncate_extend_restart(handle_t *handle, 118 struct inode *inode, 119 int needed) 120{ 121 int err; 122 123 if (!ext4_handle_valid(handle)) 124 return 0; 125 if (handle->h_buffer_credits > needed) 126 return 0; 127 err = ext4_journal_extend(handle, needed); 128 if (err <= 0) 129 return err; 130 err = ext4_truncate_restart_trans(handle, inode, needed); 131 if (err == 0) 132 err = -EAGAIN; 133 134 return err; 135} 136 137/* 138 * could return: 139 * - EROFS 140 * - ENOMEM 141 */ 142static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 143 struct ext4_ext_path *path) 144{ 145 if (path->p_bh) { 146 /* path points to block */ 147 return ext4_journal_get_write_access(handle, path->p_bh); 148 } 149 /* path points to leaf/index in inode body */ 150 /* we use in-core data, no need to protect them */ 151 return 0; 152} 153 154/* 155 * could return: 156 * - EROFS 157 * - ENOMEM 158 * - EIO 159 */ 160#define ext4_ext_dirty(handle, inode, path) \ 161 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 162static int __ext4_ext_dirty(const char *where, unsigned int line, 163 handle_t *handle, struct inode *inode, 164 struct ext4_ext_path *path) 165{ 166 int err; 167 if (path->p_bh) { 168 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 169 /* path points to block */ 170 err = __ext4_handle_dirty_metadata(where, line, handle, 171 inode, path->p_bh); 172 } else { 173 /* path points to leaf/index in inode body */ 174 err = ext4_mark_inode_dirty(handle, inode); 175 } 176 return err; 177} 178 179static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 180 struct ext4_ext_path *path, 181 ext4_lblk_t block) 182{ 183 if (path) { 184 int depth = path->p_depth; 185 struct ext4_extent *ex; 186 187 /* 188 * Try to predict block placement assuming that we are 189 * filling in a file which will eventually be 190 * non-sparse --- i.e., in the case of libbfd writing 191 * an ELF object sections out-of-order but in a way 192 * the eventually results in a contiguous object or 193 * executable file, or some database extending a table 194 * space file. However, this is actually somewhat 195 * non-ideal if we are writing a sparse file such as 196 * qemu or KVM writing a raw image file that is going 197 * to stay fairly sparse, since it will end up 198 * fragmenting the file system's free space. Maybe we 199 * should have some hueristics or some way to allow 200 * userspace to pass a hint to file system, 201 * especially if the latter case turns out to be 202 * common. 203 */ 204 ex = path[depth].p_ext; 205 if (ex) { 206 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 207 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 208 209 if (block > ext_block) 210 return ext_pblk + (block - ext_block); 211 else 212 return ext_pblk - (ext_block - block); 213 } 214 215 /* it looks like index is empty; 216 * try to find starting block from index itself */ 217 if (path[depth].p_bh) 218 return path[depth].p_bh->b_blocknr; 219 } 220 221 /* OK. use inode's group */ 222 return ext4_inode_to_goal_block(inode); 223} 224 225/* 226 * Allocation for a meta data block 227 */ 228static ext4_fsblk_t 229ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 230 struct ext4_ext_path *path, 231 struct ext4_extent *ex, int *err, unsigned int flags) 232{ 233 ext4_fsblk_t goal, newblock; 234 235 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 236 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 237 NULL, err); 238 return newblock; 239} 240 241static inline int ext4_ext_space_block(struct inode *inode, int check) 242{ 243 int size; 244 245 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 246 / sizeof(struct ext4_extent); 247#ifdef AGGRESSIVE_TEST 248 if (!check && size > 6) 249 size = 6; 250#endif 251 return size; 252} 253 254static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 255{ 256 int size; 257 258 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 259 / sizeof(struct ext4_extent_idx); 260#ifdef AGGRESSIVE_TEST 261 if (!check && size > 5) 262 size = 5; 263#endif 264 return size; 265} 266 267static inline int ext4_ext_space_root(struct inode *inode, int check) 268{ 269 int size; 270 271 size = sizeof(EXT4_I(inode)->i_data); 272 size -= sizeof(struct ext4_extent_header); 273 size /= sizeof(struct ext4_extent); 274#ifdef AGGRESSIVE_TEST 275 if (!check && size > 3) 276 size = 3; 277#endif 278 return size; 279} 280 281static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 282{ 283 int size; 284 285 size = sizeof(EXT4_I(inode)->i_data); 286 size -= sizeof(struct ext4_extent_header); 287 size /= sizeof(struct ext4_extent_idx); 288#ifdef AGGRESSIVE_TEST 289 if (!check && size > 4) 290 size = 4; 291#endif 292 return size; 293} 294 295/* 296 * Calculate the number of metadata blocks needed 297 * to allocate @blocks 298 * Worse case is one block per extent 299 */ 300int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 301{ 302 struct ext4_inode_info *ei = EXT4_I(inode); 303 int idxs; 304 305 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 306 / sizeof(struct ext4_extent_idx)); 307 308 /* 309 * If the new delayed allocation block is contiguous with the 310 * previous da block, it can share index blocks with the 311 * previous block, so we only need to allocate a new index 312 * block every idxs leaf blocks. At ldxs**2 blocks, we need 313 * an additional index block, and at ldxs**3 blocks, yet 314 * another index blocks. 315 */ 316 if (ei->i_da_metadata_calc_len && 317 ei->i_da_metadata_calc_last_lblock+1 == lblock) { 318 int num = 0; 319 320 if ((ei->i_da_metadata_calc_len % idxs) == 0) 321 num++; 322 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 323 num++; 324 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 325 num++; 326 ei->i_da_metadata_calc_len = 0; 327 } else 328 ei->i_da_metadata_calc_len++; 329 ei->i_da_metadata_calc_last_lblock++; 330 return num; 331 } 332 333 /* 334 * In the worst case we need a new set of index blocks at 335 * every level of the inode's extent tree. 336 */ 337 ei->i_da_metadata_calc_len = 1; 338 ei->i_da_metadata_calc_last_lblock = lblock; 339 return ext_depth(inode) + 1; 340} 341 342static int 343ext4_ext_max_entries(struct inode *inode, int depth) 344{ 345 int max; 346 347 if (depth == ext_depth(inode)) { 348 if (depth == 0) 349 max = ext4_ext_space_root(inode, 1); 350 else 351 max = ext4_ext_space_root_idx(inode, 1); 352 } else { 353 if (depth == 0) 354 max = ext4_ext_space_block(inode, 1); 355 else 356 max = ext4_ext_space_block_idx(inode, 1); 357 } 358 359 return max; 360} 361 362static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 363{ 364 ext4_fsblk_t block = ext4_ext_pblock(ext); 365 int len = ext4_ext_get_actual_len(ext); 366 367 if (len == 0) 368 return 0; 369 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 370} 371 372static int ext4_valid_extent_idx(struct inode *inode, 373 struct ext4_extent_idx *ext_idx) 374{ 375 ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 376 377 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 378} 379 380static int ext4_valid_extent_entries(struct inode *inode, 381 struct ext4_extent_header *eh, 382 int depth) 383{ 384 unsigned short entries; 385 if (eh->eh_entries == 0) 386 return 1; 387 388 entries = le16_to_cpu(eh->eh_entries); 389 390 if (depth == 0) { 391 /* leaf entries */ 392 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 393 while (entries) { 394 if (!ext4_valid_extent(inode, ext)) 395 return 0; 396 ext++; 397 entries--; 398 } 399 } else { 400 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 401 while (entries) { 402 if (!ext4_valid_extent_idx(inode, ext_idx)) 403 return 0; 404 ext_idx++; 405 entries--; 406 } 407 } 408 return 1; 409} 410 411static int __ext4_ext_check(const char *function, unsigned int line, 412 struct inode *inode, struct ext4_extent_header *eh, 413 int depth) 414{ 415 const char *error_msg; 416 int max = 0; 417 418 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 419 error_msg = "invalid magic"; 420 goto corrupted; 421 } 422 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 423 error_msg = "unexpected eh_depth"; 424 goto corrupted; 425 } 426 if (unlikely(eh->eh_max == 0)) { 427 error_msg = "invalid eh_max"; 428 goto corrupted; 429 } 430 max = ext4_ext_max_entries(inode, depth); 431 if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 432 error_msg = "too large eh_max"; 433 goto corrupted; 434 } 435 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 436 error_msg = "invalid eh_entries"; 437 goto corrupted; 438 } 439 if (!ext4_valid_extent_entries(inode, eh, depth)) { 440 error_msg = "invalid extent entries"; 441 goto corrupted; 442 } 443 /* Verify checksum on non-root extent tree nodes */ 444 if (ext_depth(inode) != depth && 445 !ext4_extent_block_csum_verify(inode, eh)) { 446 error_msg = "extent tree corrupted"; 447 goto corrupted; 448 } 449 return 0; 450 451corrupted: 452 ext4_error_inode(inode, function, line, 0, 453 "bad header/extent: %s - magic %x, " 454 "entries %u, max %u(%u), depth %u(%u)", 455 error_msg, le16_to_cpu(eh->eh_magic), 456 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 457 max, le16_to_cpu(eh->eh_depth), depth); 458 459 return -EIO; 460} 461 462#define ext4_ext_check(inode, eh, depth) \ 463 __ext4_ext_check(__func__, __LINE__, inode, eh, depth) 464 465int ext4_ext_check_inode(struct inode *inode) 466{ 467 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); 468} 469 470static int __ext4_ext_check_block(const char *function, unsigned int line, 471 struct inode *inode, 472 struct ext4_extent_header *eh, 473 int depth, 474 struct buffer_head *bh) 475{ 476 int ret; 477 478 if (buffer_verified(bh)) 479 return 0; 480 ret = ext4_ext_check(inode, eh, depth); 481 if (ret) 482 return ret; 483 set_buffer_verified(bh); 484 return ret; 485} 486 487#define ext4_ext_check_block(inode, eh, depth, bh) \ 488 __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh) 489 490#ifdef EXT_DEBUG 491static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 492{ 493 int k, l = path->p_depth; 494 495 ext_debug("path:"); 496 for (k = 0; k <= l; k++, path++) { 497 if (path->p_idx) { 498 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 499 ext4_idx_pblock(path->p_idx)); 500 } else if (path->p_ext) { 501 ext_debug(" %d:[%d]%d:%llu ", 502 le32_to_cpu(path->p_ext->ee_block), 503 ext4_ext_is_uninitialized(path->p_ext), 504 ext4_ext_get_actual_len(path->p_ext), 505 ext4_ext_pblock(path->p_ext)); 506 } else 507 ext_debug(" []"); 508 } 509 ext_debug("\n"); 510} 511 512static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 513{ 514 int depth = ext_depth(inode); 515 struct ext4_extent_header *eh; 516 struct ext4_extent *ex; 517 int i; 518 519 if (!path) 520 return; 521 522 eh = path[depth].p_hdr; 523 ex = EXT_FIRST_EXTENT(eh); 524 525 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 526 527 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 528 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 529 ext4_ext_is_uninitialized(ex), 530 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 531 } 532 ext_debug("\n"); 533} 534 535static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 536 ext4_fsblk_t newblock, int level) 537{ 538 int depth = ext_depth(inode); 539 struct ext4_extent *ex; 540 541 if (depth != level) { 542 struct ext4_extent_idx *idx; 543 idx = path[level].p_idx; 544 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 545 ext_debug("%d: move %d:%llu in new index %llu\n", level, 546 le32_to_cpu(idx->ei_block), 547 ext4_idx_pblock(idx), 548 newblock); 549 idx++; 550 } 551 552 return; 553 } 554 555 ex = path[depth].p_ext; 556 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 557 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 558 le32_to_cpu(ex->ee_block), 559 ext4_ext_pblock(ex), 560 ext4_ext_is_uninitialized(ex), 561 ext4_ext_get_actual_len(ex), 562 newblock); 563 ex++; 564 } 565} 566 567#else 568#define ext4_ext_show_path(inode, path) 569#define ext4_ext_show_leaf(inode, path) 570#define ext4_ext_show_move(inode, path, newblock, level) 571#endif 572 573void ext4_ext_drop_refs(struct ext4_ext_path *path) 574{ 575 int depth = path->p_depth; 576 int i; 577 578 for (i = 0; i <= depth; i++, path++) 579 if (path->p_bh) { 580 brelse(path->p_bh); 581 path->p_bh = NULL; 582 } 583} 584 585/* 586 * ext4_ext_binsearch_idx: 587 * binary search for the closest index of the given block 588 * the header must be checked before calling this 589 */ 590static void 591ext4_ext_binsearch_idx(struct inode *inode, 592 struct ext4_ext_path *path, ext4_lblk_t block) 593{ 594 struct ext4_extent_header *eh = path->p_hdr; 595 struct ext4_extent_idx *r, *l, *m; 596 597 598 ext_debug("binsearch for %u(idx): ", block); 599 600 l = EXT_FIRST_INDEX(eh) + 1; 601 r = EXT_LAST_INDEX(eh); 602 while (l <= r) { 603 m = l + (r - l) / 2; 604 if (block < le32_to_cpu(m->ei_block)) 605 r = m - 1; 606 else 607 l = m + 1; 608 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 609 m, le32_to_cpu(m->ei_block), 610 r, le32_to_cpu(r->ei_block)); 611 } 612 613 path->p_idx = l - 1; 614 ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 615 ext4_idx_pblock(path->p_idx)); 616 617#ifdef CHECK_BINSEARCH 618 { 619 struct ext4_extent_idx *chix, *ix; 620 int k; 621 622 chix = ix = EXT_FIRST_INDEX(eh); 623 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 624 if (k != 0 && 625 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 626 printk(KERN_DEBUG "k=%d, ix=0x%p, " 627 "first=0x%p\n", k, 628 ix, EXT_FIRST_INDEX(eh)); 629 printk(KERN_DEBUG "%u <= %u\n", 630 le32_to_cpu(ix->ei_block), 631 le32_to_cpu(ix[-1].ei_block)); 632 } 633 BUG_ON(k && le32_to_cpu(ix->ei_block) 634 <= le32_to_cpu(ix[-1].ei_block)); 635 if (block < le32_to_cpu(ix->ei_block)) 636 break; 637 chix = ix; 638 } 639 BUG_ON(chix != path->p_idx); 640 } 641#endif 642 643} 644 645/* 646 * ext4_ext_binsearch: 647 * binary search for closest extent of the given block 648 * the header must be checked before calling this 649 */ 650static void 651ext4_ext_binsearch(struct inode *inode, 652 struct ext4_ext_path *path, ext4_lblk_t block) 653{ 654 struct ext4_extent_header *eh = path->p_hdr; 655 struct ext4_extent *r, *l, *m; 656 657 if (eh->eh_entries == 0) { 658 /* 659 * this leaf is empty: 660 * we get such a leaf in split/add case 661 */ 662 return; 663 } 664 665 ext_debug("binsearch for %u: ", block); 666 667 l = EXT_FIRST_EXTENT(eh) + 1; 668 r = EXT_LAST_EXTENT(eh); 669 670 while (l <= r) { 671 m = l + (r - l) / 2; 672 if (block < le32_to_cpu(m->ee_block)) 673 r = m - 1; 674 else 675 l = m + 1; 676 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 677 m, le32_to_cpu(m->ee_block), 678 r, le32_to_cpu(r->ee_block)); 679 } 680 681 path->p_ext = l - 1; 682 ext_debug(" -> %d:%llu:[%d]%d ", 683 le32_to_cpu(path->p_ext->ee_block), 684 ext4_ext_pblock(path->p_ext), 685 ext4_ext_is_uninitialized(path->p_ext), 686 ext4_ext_get_actual_len(path->p_ext)); 687 688#ifdef CHECK_BINSEARCH 689 { 690 struct ext4_extent *chex, *ex; 691 int k; 692 693 chex = ex = EXT_FIRST_EXTENT(eh); 694 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 695 BUG_ON(k && le32_to_cpu(ex->ee_block) 696 <= le32_to_cpu(ex[-1].ee_block)); 697 if (block < le32_to_cpu(ex->ee_block)) 698 break; 699 chex = ex; 700 } 701 BUG_ON(chex != path->p_ext); 702 } 703#endif 704 705} 706 707int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 708{ 709 struct ext4_extent_header *eh; 710 711 eh = ext_inode_hdr(inode); 712 eh->eh_depth = 0; 713 eh->eh_entries = 0; 714 eh->eh_magic = EXT4_EXT_MAGIC; 715 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 716 ext4_mark_inode_dirty(handle, inode); 717 ext4_ext_invalidate_cache(inode); 718 return 0; 719} 720 721struct ext4_ext_path * 722ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 723 struct ext4_ext_path *path) 724{ 725 struct ext4_extent_header *eh; 726 struct buffer_head *bh; 727 short int depth, i, ppos = 0, alloc = 0; 728 int ret; 729 730 eh = ext_inode_hdr(inode); 731 depth = ext_depth(inode); 732 733 /* account possible depth increase */ 734 if (!path) { 735 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 736 GFP_NOFS); 737 if (!path) 738 return ERR_PTR(-ENOMEM); 739 alloc = 1; 740 } 741 path[0].p_hdr = eh; 742 path[0].p_bh = NULL; 743 744 i = depth; 745 /* walk through the tree */ 746 while (i) { 747 ext_debug("depth %d: num %d, max %d\n", 748 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 749 750 ext4_ext_binsearch_idx(inode, path + ppos, block); 751 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 752 path[ppos].p_depth = i; 753 path[ppos].p_ext = NULL; 754 755 bh = sb_getblk(inode->i_sb, path[ppos].p_block); 756 if (unlikely(!bh)) { 757 ret = -ENOMEM; 758 goto err; 759 } 760 if (!bh_uptodate_or_lock(bh)) { 761 trace_ext4_ext_load_extent(inode, block, 762 path[ppos].p_block); 763 ret = bh_submit_read(bh); 764 if (ret < 0) { 765 put_bh(bh); 766 goto err; 767 } 768 } 769 eh = ext_block_hdr(bh); 770 ppos++; 771 if (unlikely(ppos > depth)) { 772 put_bh(bh); 773 EXT4_ERROR_INODE(inode, 774 "ppos %d > depth %d", ppos, depth); 775 ret = -EIO; 776 goto err; 777 } 778 path[ppos].p_bh = bh; 779 path[ppos].p_hdr = eh; 780 i--; 781 782 ret = ext4_ext_check_block(inode, eh, i, bh); 783 if (ret < 0) 784 goto err; 785 } 786 787 path[ppos].p_depth = i; 788 path[ppos].p_ext = NULL; 789 path[ppos].p_idx = NULL; 790 791 /* find extent */ 792 ext4_ext_binsearch(inode, path + ppos, block); 793 /* if not an empty leaf */ 794 if (path[ppos].p_ext) 795 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 796 797 ext4_ext_show_path(inode, path); 798 799 return path; 800 801err: 802 ext4_ext_drop_refs(path); 803 if (alloc) 804 kfree(path); 805 return ERR_PTR(ret); 806} 807 808/* 809 * ext4_ext_insert_index: 810 * insert new index [@logical;@ptr] into the block at @curp; 811 * check where to insert: before @curp or after @curp 812 */ 813static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 814 struct ext4_ext_path *curp, 815 int logical, ext4_fsblk_t ptr) 816{ 817 struct ext4_extent_idx *ix; 818 int len, err; 819 820 err = ext4_ext_get_access(handle, inode, curp); 821 if (err) 822 return err; 823 824 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 825 EXT4_ERROR_INODE(inode, 826 "logical %d == ei_block %d!", 827 logical, le32_to_cpu(curp->p_idx->ei_block)); 828 return -EIO; 829 } 830 831 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 832 >= le16_to_cpu(curp->p_hdr->eh_max))) { 833 EXT4_ERROR_INODE(inode, 834 "eh_entries %d >= eh_max %d!", 835 le16_to_cpu(curp->p_hdr->eh_entries), 836 le16_to_cpu(curp->p_hdr->eh_max)); 837 return -EIO; 838 } 839 840 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 841 /* insert after */ 842 ext_debug("insert new index %d after: %llu\n", logical, ptr); 843 ix = curp->p_idx + 1; 844 } else { 845 /* insert before */ 846 ext_debug("insert new index %d before: %llu\n", logical, ptr); 847 ix = curp->p_idx; 848 } 849 850 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 851 BUG_ON(len < 0); 852 if (len > 0) { 853 ext_debug("insert new index %d: " 854 "move %d indices from 0x%p to 0x%p\n", 855 logical, len, ix, ix + 1); 856 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 857 } 858 859 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 860 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 861 return -EIO; 862 } 863 864 ix->ei_block = cpu_to_le32(logical); 865 ext4_idx_store_pblock(ix, ptr); 866 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 867 868 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 869 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 870 return -EIO; 871 } 872 873 err = ext4_ext_dirty(handle, inode, curp); 874 ext4_std_error(inode->i_sb, err); 875 876 return err; 877} 878 879/* 880 * ext4_ext_split: 881 * inserts new subtree into the path, using free index entry 882 * at depth @at: 883 * - allocates all needed blocks (new leaf and all intermediate index blocks) 884 * - makes decision where to split 885 * - moves remaining extents and index entries (right to the split point) 886 * into the newly allocated blocks 887 * - initializes subtree 888 */ 889static int ext4_ext_split(handle_t *handle, struct inode *inode, 890 unsigned int flags, 891 struct ext4_ext_path *path, 892 struct ext4_extent *newext, int at) 893{ 894 struct buffer_head *bh = NULL; 895 int depth = ext_depth(inode); 896 struct ext4_extent_header *neh; 897 struct ext4_extent_idx *fidx; 898 int i = at, k, m, a; 899 ext4_fsblk_t newblock, oldblock; 900 __le32 border; 901 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 902 int err = 0; 903 904 /* make decision: where to split? */ 905 /* FIXME: now decision is simplest: at current extent */ 906 907 /* if current leaf will be split, then we should use 908 * border from split point */ 909 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 910 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 911 return -EIO; 912 } 913 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 914 border = path[depth].p_ext[1].ee_block; 915 ext_debug("leaf will be split." 916 " next leaf starts at %d\n", 917 le32_to_cpu(border)); 918 } else { 919 border = newext->ee_block; 920 ext_debug("leaf will be added." 921 " next leaf starts at %d\n", 922 le32_to_cpu(border)); 923 } 924 925 /* 926 * If error occurs, then we break processing 927 * and mark filesystem read-only. index won't 928 * be inserted and tree will be in consistent 929 * state. Next mount will repair buffers too. 930 */ 931 932 /* 933 * Get array to track all allocated blocks. 934 * We need this to handle errors and free blocks 935 * upon them. 936 */ 937 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 938 if (!ablocks) 939 return -ENOMEM; 940 941 /* allocate all needed blocks */ 942 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 943 for (a = 0; a < depth - at; a++) { 944 newblock = ext4_ext_new_meta_block(handle, inode, path, 945 newext, &err, flags); 946 if (newblock == 0) 947 goto cleanup; 948 ablocks[a] = newblock; 949 } 950 951 /* initialize new leaf */ 952 newblock = ablocks[--a]; 953 if (unlikely(newblock == 0)) { 954 EXT4_ERROR_INODE(inode, "newblock == 0!"); 955 err = -EIO; 956 goto cleanup; 957 } 958 bh = sb_getblk(inode->i_sb, newblock); 959 if (unlikely(!bh)) { 960 err = -ENOMEM; 961 goto cleanup; 962 } 963 lock_buffer(bh); 964 965 err = ext4_journal_get_create_access(handle, bh); 966 if (err) 967 goto cleanup; 968 969 neh = ext_block_hdr(bh); 970 neh->eh_entries = 0; 971 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 972 neh->eh_magic = EXT4_EXT_MAGIC; 973 neh->eh_depth = 0; 974 975 /* move remainder of path[depth] to the new leaf */ 976 if (unlikely(path[depth].p_hdr->eh_entries != 977 path[depth].p_hdr->eh_max)) { 978 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 979 path[depth].p_hdr->eh_entries, 980 path[depth].p_hdr->eh_max); 981 err = -EIO; 982 goto cleanup; 983 } 984 /* start copy from next extent */ 985 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 986 ext4_ext_show_move(inode, path, newblock, depth); 987 if (m) { 988 struct ext4_extent *ex; 989 ex = EXT_FIRST_EXTENT(neh); 990 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 991 le16_add_cpu(&neh->eh_entries, m); 992 } 993 994 ext4_extent_block_csum_set(inode, neh); 995 set_buffer_uptodate(bh); 996 unlock_buffer(bh); 997 998 err = ext4_handle_dirty_metadata(handle, inode, bh); 999 if (err) 1000 goto cleanup; 1001 brelse(bh); 1002 bh = NULL; 1003 1004 /* correct old leaf */ 1005 if (m) { 1006 err = ext4_ext_get_access(handle, inode, path + depth); 1007 if (err) 1008 goto cleanup; 1009 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 1010 err = ext4_ext_dirty(handle, inode, path + depth); 1011 if (err) 1012 goto cleanup; 1013 1014 } 1015 1016 /* create intermediate indexes */ 1017 k = depth - at - 1; 1018 if (unlikely(k < 0)) { 1019 EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1020 err = -EIO; 1021 goto cleanup; 1022 } 1023 if (k) 1024 ext_debug("create %d intermediate indices\n", k); 1025 /* insert new index into current index block */ 1026 /* current depth stored in i var */ 1027 i = depth - 1; 1028 while (k--) { 1029 oldblock = newblock; 1030 newblock = ablocks[--a]; 1031 bh = sb_getblk(inode->i_sb, newblock); 1032 if (unlikely(!bh)) { 1033 err = -ENOMEM; 1034 goto cleanup; 1035 } 1036 lock_buffer(bh); 1037 1038 err = ext4_journal_get_create_access(handle, bh); 1039 if (err) 1040 goto cleanup; 1041 1042 neh = ext_block_hdr(bh); 1043 neh->eh_entries = cpu_to_le16(1); 1044 neh->eh_magic = EXT4_EXT_MAGIC; 1045 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1046 neh->eh_depth = cpu_to_le16(depth - i); 1047 fidx = EXT_FIRST_INDEX(neh); 1048 fidx->ei_block = border; 1049 ext4_idx_store_pblock(fidx, oldblock); 1050 1051 ext_debug("int.index at %d (block %llu): %u -> %llu\n", 1052 i, newblock, le32_to_cpu(border), oldblock); 1053 1054 /* move remainder of path[i] to the new index block */ 1055 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1056 EXT_LAST_INDEX(path[i].p_hdr))) { 1057 EXT4_ERROR_INODE(inode, 1058 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1059 le32_to_cpu(path[i].p_ext->ee_block)); 1060 err = -EIO; 1061 goto cleanup; 1062 } 1063 /* start copy indexes */ 1064 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 1065 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 1066 EXT_MAX_INDEX(path[i].p_hdr)); 1067 ext4_ext_show_move(inode, path, newblock, i); 1068 if (m) { 1069 memmove(++fidx, path[i].p_idx, 1070 sizeof(struct ext4_extent_idx) * m); 1071 le16_add_cpu(&neh->eh_entries, m); 1072 } 1073 ext4_extent_block_csum_set(inode, neh); 1074 set_buffer_uptodate(bh); 1075 unlock_buffer(bh); 1076 1077 err = ext4_handle_dirty_metadata(handle, inode, bh); 1078 if (err) 1079 goto cleanup; 1080 brelse(bh); 1081 bh = NULL; 1082 1083 /* correct old index */ 1084 if (m) { 1085 err = ext4_ext_get_access(handle, inode, path + i); 1086 if (err) 1087 goto cleanup; 1088 le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1089 err = ext4_ext_dirty(handle, inode, path + i); 1090 if (err) 1091 goto cleanup; 1092 } 1093 1094 i--; 1095 } 1096 1097 /* insert new index */ 1098 err = ext4_ext_insert_index(handle, inode, path + at, 1099 le32_to_cpu(border), newblock); 1100 1101cleanup: 1102 if (bh) { 1103 if (buffer_locked(bh)) 1104 unlock_buffer(bh); 1105 brelse(bh); 1106 } 1107 1108 if (err) { 1109 /* free all allocated blocks in error case */ 1110 for (i = 0; i < depth; i++) { 1111 if (!ablocks[i]) 1112 continue; 1113 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1114 EXT4_FREE_BLOCKS_METADATA); 1115 } 1116 } 1117 kfree(ablocks); 1118 1119 return err; 1120} 1121 1122/* 1123 * ext4_ext_grow_indepth: 1124 * implements tree growing procedure: 1125 * - allocates new block 1126 * - moves top-level data (index block or leaf) into the new block 1127 * - initializes new top-level, creating index that points to the 1128 * just created block 1129 */ 1130static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1131 unsigned int flags, 1132 struct ext4_extent *newext) 1133{ 1134 struct ext4_extent_header *neh; 1135 struct buffer_head *bh; 1136 ext4_fsblk_t newblock; 1137 int err = 0; 1138 1139 newblock = ext4_ext_new_meta_block(handle, inode, NULL, 1140 newext, &err, flags); 1141 if (newblock == 0) 1142 return err; 1143 1144 bh = sb_getblk(inode->i_sb, newblock); 1145 if (unlikely(!bh)) 1146 return -ENOMEM; 1147 lock_buffer(bh); 1148 1149 err = ext4_journal_get_create_access(handle, bh); 1150 if (err) { 1151 unlock_buffer(bh); 1152 goto out; 1153 } 1154 1155 /* move top-level index/leaf into new block */ 1156 memmove(bh->b_data, EXT4_I(inode)->i_data, 1157 sizeof(EXT4_I(inode)->i_data)); 1158 1159 /* set size of new block */ 1160 neh = ext_block_hdr(bh); 1161 /* old root could have indexes or leaves 1162 * so calculate e_max right way */ 1163 if (ext_depth(inode)) 1164 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1165 else 1166 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1167 neh->eh_magic = EXT4_EXT_MAGIC; 1168 ext4_extent_block_csum_set(inode, neh); 1169 set_buffer_uptodate(bh); 1170 unlock_buffer(bh); 1171 1172 err = ext4_handle_dirty_metadata(handle, inode, bh); 1173 if (err) 1174 goto out; 1175 1176 /* Update top-level index: num,max,pointer */ 1177 neh = ext_inode_hdr(inode); 1178 neh->eh_entries = cpu_to_le16(1); 1179 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 1180 if (neh->eh_depth == 0) { 1181 /* Root extent block becomes index block */ 1182 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 1183 EXT_FIRST_INDEX(neh)->ei_block = 1184 EXT_FIRST_EXTENT(neh)->ee_block; 1185 } 1186 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1187 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 1188 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1189 ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1190 1191 le16_add_cpu(&neh->eh_depth, 1); 1192 ext4_mark_inode_dirty(handle, inode); 1193out: 1194 brelse(bh); 1195 1196 return err; 1197} 1198 1199/* 1200 * ext4_ext_create_new_leaf: 1201 * finds empty index and adds new leaf. 1202 * if no free index is found, then it requests in-depth growing. 1203 */ 1204static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1205 unsigned int flags, 1206 struct ext4_ext_path *path, 1207 struct ext4_extent *newext) 1208{ 1209 struct ext4_ext_path *curp; 1210 int depth, i, err = 0; 1211 1212repeat: 1213 i = depth = ext_depth(inode); 1214 1215 /* walk up to the tree and look for free index entry */ 1216 curp = path + depth; 1217 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1218 i--; 1219 curp--; 1220 } 1221 1222 /* we use already allocated block for index block, 1223 * so subsequent data blocks should be contiguous */ 1224 if (EXT_HAS_FREE_INDEX(curp)) { 1225 /* if we found index with free entry, then use that 1226 * entry: create all needed subtree and add new leaf */ 1227 err = ext4_ext_split(handle, inode, flags, path, newext, i); 1228 if (err) 1229 goto out; 1230 1231 /* refill path */ 1232 ext4_ext_drop_refs(path); 1233 path = ext4_ext_find_extent(inode, 1234 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1235 path); 1236 if (IS_ERR(path)) 1237 err = PTR_ERR(path); 1238 } else { 1239 /* tree is full, time to grow in depth */ 1240 err = ext4_ext_grow_indepth(handle, inode, flags, newext); 1241 if (err) 1242 goto out; 1243 1244 /* refill path */ 1245 ext4_ext_drop_refs(path); 1246 path = ext4_ext_find_extent(inode, 1247 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1248 path); 1249 if (IS_ERR(path)) { 1250 err = PTR_ERR(path); 1251 goto out; 1252 } 1253 1254 /* 1255 * only first (depth 0 -> 1) produces free space; 1256 * in all other cases we have to split the grown tree 1257 */ 1258 depth = ext_depth(inode); 1259 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1260 /* now we need to split */ 1261 goto repeat; 1262 } 1263 } 1264 1265out: 1266 return err; 1267} 1268 1269/* 1270 * search the closest allocated block to the left for *logical 1271 * and returns it at @logical + it's physical address at @phys 1272 * if *logical is the smallest allocated block, the function 1273 * returns 0 at @phys 1274 * return value contains 0 (success) or error code 1275 */ 1276static int ext4_ext_search_left(struct inode *inode, 1277 struct ext4_ext_path *path, 1278 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1279{ 1280 struct ext4_extent_idx *ix; 1281 struct ext4_extent *ex; 1282 int depth, ee_len; 1283 1284 if (unlikely(path == NULL)) { 1285 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1286 return -EIO; 1287 } 1288 depth = path->p_depth; 1289 *phys = 0; 1290 1291 if (depth == 0 && path->p_ext == NULL) 1292 return 0; 1293 1294 /* usually extent in the path covers blocks smaller 1295 * then *logical, but it can be that extent is the 1296 * first one in the file */ 1297 1298 ex = path[depth].p_ext; 1299 ee_len = ext4_ext_get_actual_len(ex); 1300 if (*logical < le32_to_cpu(ex->ee_block)) { 1301 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1302 EXT4_ERROR_INODE(inode, 1303 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1304 *logical, le32_to_cpu(ex->ee_block)); 1305 return -EIO; 1306 } 1307 while (--depth >= 0) { 1308 ix = path[depth].p_idx; 1309 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1310 EXT4_ERROR_INODE(inode, 1311 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 1312 ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1313 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 1314 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1315 depth); 1316 return -EIO; 1317 } 1318 } 1319 return 0; 1320 } 1321 1322 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1323 EXT4_ERROR_INODE(inode, 1324 "logical %d < ee_block %d + ee_len %d!", 1325 *logical, le32_to_cpu(ex->ee_block), ee_len); 1326 return -EIO; 1327 } 1328 1329 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1330 *phys = ext4_ext_pblock(ex) + ee_len - 1; 1331 return 0; 1332} 1333 1334/* 1335 * search the closest allocated block to the right for *logical 1336 * and returns it at @logical + it's physical address at @phys 1337 * if *logical is the largest allocated block, the function 1338 * returns 0 at @phys 1339 * return value contains 0 (success) or error code 1340 */ 1341static int ext4_ext_search_right(struct inode *inode, 1342 struct ext4_ext_path *path, 1343 ext4_lblk_t *logical, ext4_fsblk_t *phys, 1344 struct ext4_extent **ret_ex) 1345{ 1346 struct buffer_head *bh = NULL; 1347 struct ext4_extent_header *eh; 1348 struct ext4_extent_idx *ix; 1349 struct ext4_extent *ex; 1350 ext4_fsblk_t block; 1351 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1352 int ee_len; 1353 1354 if (unlikely(path == NULL)) { 1355 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1356 return -EIO; 1357 } 1358 depth = path->p_depth; 1359 *phys = 0; 1360 1361 if (depth == 0 && path->p_ext == NULL) 1362 return 0; 1363 1364 /* usually extent in the path covers blocks smaller 1365 * then *logical, but it can be that extent is the 1366 * first one in the file */ 1367 1368 ex = path[depth].p_ext; 1369 ee_len = ext4_ext_get_actual_len(ex); 1370 if (*logical < le32_to_cpu(ex->ee_block)) { 1371 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1372 EXT4_ERROR_INODE(inode, 1373 "first_extent(path[%d].p_hdr) != ex", 1374 depth); 1375 return -EIO; 1376 } 1377 while (--depth >= 0) { 1378 ix = path[depth].p_idx; 1379 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1380 EXT4_ERROR_INODE(inode, 1381 "ix != EXT_FIRST_INDEX *logical %d!", 1382 *logical); 1383 return -EIO; 1384 } 1385 } 1386 goto found_extent; 1387 } 1388 1389 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1390 EXT4_ERROR_INODE(inode, 1391 "logical %d < ee_block %d + ee_len %d!", 1392 *logical, le32_to_cpu(ex->ee_block), ee_len); 1393 return -EIO; 1394 } 1395 1396 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1397 /* next allocated block in this leaf */ 1398 ex++; 1399 goto found_extent; 1400 } 1401 1402 /* go up and search for index to the right */ 1403 while (--depth >= 0) { 1404 ix = path[depth].p_idx; 1405 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 1406 goto got_index; 1407 } 1408 1409 /* we've gone up to the root and found no index to the right */ 1410 return 0; 1411 1412got_index: 1413 /* we've found index to the right, let's 1414 * follow it and find the closest allocated 1415 * block to the right */ 1416 ix++; 1417 block = ext4_idx_pblock(ix); 1418 while (++depth < path->p_depth) { 1419 bh = sb_bread(inode->i_sb, block); 1420 if (bh == NULL) 1421 return -EIO; 1422 eh = ext_block_hdr(bh); 1423 /* subtract from p_depth to get proper eh_depth */ 1424 if (ext4_ext_check_block(inode, eh, 1425 path->p_depth - depth, bh)) { 1426 put_bh(bh); 1427 return -EIO; 1428 } 1429 ix = EXT_FIRST_INDEX(eh); 1430 block = ext4_idx_pblock(ix); 1431 put_bh(bh); 1432 } 1433 1434 bh = sb_bread(inode->i_sb, block); 1435 if (bh == NULL) 1436 return -EIO; 1437 eh = ext_block_hdr(bh); 1438 if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) { 1439 put_bh(bh); 1440 return -EIO; 1441 } 1442 ex = EXT_FIRST_EXTENT(eh); 1443found_extent: 1444 *logical = le32_to_cpu(ex->ee_block); 1445 *phys = ext4_ext_pblock(ex); 1446 *ret_ex = ex; 1447 if (bh) 1448 put_bh(bh); 1449 return 0; 1450} 1451 1452/* 1453 * ext4_ext_next_allocated_block: 1454 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1455 * NOTE: it considers block number from index entry as 1456 * allocated block. Thus, index entries have to be consistent 1457 * with leaves. 1458 */ 1459static ext4_lblk_t 1460ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1461{ 1462 int depth; 1463 1464 BUG_ON(path == NULL); 1465 depth = path->p_depth; 1466 1467 if (depth == 0 && path->p_ext == NULL) 1468 return EXT_MAX_BLOCKS; 1469 1470 while (depth >= 0) { 1471 if (depth == path->p_depth) { 1472 /* leaf */ 1473 if (path[depth].p_ext && 1474 path[depth].p_ext != 1475 EXT_LAST_EXTENT(path[depth].p_hdr)) 1476 return le32_to_cpu(path[depth].p_ext[1].ee_block); 1477 } else { 1478 /* index */ 1479 if (path[depth].p_idx != 1480 EXT_LAST_INDEX(path[depth].p_hdr)) 1481 return le32_to_cpu(path[depth].p_idx[1].ei_block); 1482 } 1483 depth--; 1484 } 1485 1486 return EXT_MAX_BLOCKS; 1487} 1488 1489/* 1490 * ext4_ext_next_leaf_block: 1491 * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1492 */ 1493static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1494{ 1495 int depth; 1496 1497 BUG_ON(path == NULL); 1498 depth = path->p_depth; 1499 1500 /* zero-tree has no leaf blocks at all */ 1501 if (depth == 0) 1502 return EXT_MAX_BLOCKS; 1503 1504 /* go to index block */ 1505 depth--; 1506 1507 while (depth >= 0) { 1508 if (path[depth].p_idx != 1509 EXT_LAST_INDEX(path[depth].p_hdr)) 1510 return (ext4_lblk_t) 1511 le32_to_cpu(path[depth].p_idx[1].ei_block); 1512 depth--; 1513 } 1514 1515 return EXT_MAX_BLOCKS; 1516} 1517 1518/* 1519 * ext4_ext_correct_indexes: 1520 * if leaf gets modified and modified extent is first in the leaf, 1521 * then we have to correct all indexes above. 1522 * TODO: do we need to correct tree in all cases? 1523 */ 1524static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1525 struct ext4_ext_path *path) 1526{ 1527 struct ext4_extent_header *eh; 1528 int depth = ext_depth(inode); 1529 struct ext4_extent *ex; 1530 __le32 border; 1531 int k, err = 0; 1532 1533 eh = path[depth].p_hdr; 1534 ex = path[depth].p_ext; 1535 1536 if (unlikely(ex == NULL || eh == NULL)) { 1537 EXT4_ERROR_INODE(inode, 1538 "ex %p == NULL or eh %p == NULL", ex, eh); 1539 return -EIO; 1540 } 1541 1542 if (depth == 0) { 1543 /* there is no tree at all */ 1544 return 0; 1545 } 1546 1547 if (ex != EXT_FIRST_EXTENT(eh)) { 1548 /* we correct tree if first leaf got modified only */ 1549 return 0; 1550 } 1551 1552 /* 1553 * TODO: we need correction if border is smaller than current one 1554 */ 1555 k = depth - 1; 1556 border = path[depth].p_ext->ee_block; 1557 err = ext4_ext_get_access(handle, inode, path + k); 1558 if (err) 1559 return err; 1560 path[k].p_idx->ei_block = border; 1561 err = ext4_ext_dirty(handle, inode, path + k); 1562 if (err) 1563 return err; 1564 1565 while (k--) { 1566 /* change all left-side indexes */ 1567 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1568 break; 1569 err = ext4_ext_get_access(handle, inode, path + k); 1570 if (err) 1571 break; 1572 path[k].p_idx->ei_block = border; 1573 err = ext4_ext_dirty(handle, inode, path + k); 1574 if (err) 1575 break; 1576 } 1577 1578 return err; 1579} 1580 1581int 1582ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1583 struct ext4_extent *ex2) 1584{ 1585 unsigned short ext1_ee_len, ext2_ee_len, max_len; 1586 1587 /* 1588 * Make sure that either both extents are uninitialized, or 1589 * both are _not_. 1590 */ 1591 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) 1592 return 0; 1593 1594 if (ext4_ext_is_uninitialized(ex1)) 1595 max_len = EXT_UNINIT_MAX_LEN; 1596 else 1597 max_len = EXT_INIT_MAX_LEN; 1598 1599 ext1_ee_len = ext4_ext_get_actual_len(ex1); 1600 ext2_ee_len = ext4_ext_get_actual_len(ex2); 1601 1602 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 1603 le32_to_cpu(ex2->ee_block)) 1604 return 0; 1605 1606 /* 1607 * To allow future support for preallocated extents to be added 1608 * as an RO_COMPAT feature, refuse to merge to extents if 1609 * this can result in the top bit of ee_len being set. 1610 */ 1611 if (ext1_ee_len + ext2_ee_len > max_len) 1612 return 0; 1613#ifdef AGGRESSIVE_TEST 1614 if (ext1_ee_len >= 4) 1615 return 0; 1616#endif 1617 1618 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1619 return 1; 1620 return 0; 1621} 1622 1623/* 1624 * This function tries to merge the "ex" extent to the next extent in the tree. 1625 * It always tries to merge towards right. If you want to merge towards 1626 * left, pass "ex - 1" as argument instead of "ex". 1627 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 1628 * 1 if they got merged. 1629 */ 1630static int ext4_ext_try_to_merge_right(struct inode *inode, 1631 struct ext4_ext_path *path, 1632 struct ext4_extent *ex) 1633{ 1634 struct ext4_extent_header *eh; 1635 unsigned int depth, len; 1636 int merge_done = 0; 1637 int uninitialized = 0; 1638 1639 depth = ext_depth(inode); 1640 BUG_ON(path[depth].p_hdr == NULL); 1641 eh = path[depth].p_hdr; 1642 1643 while (ex < EXT_LAST_EXTENT(eh)) { 1644 if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 1645 break; 1646 /* merge with next extent! */ 1647 if (ext4_ext_is_uninitialized(ex)) 1648 uninitialized = 1; 1649 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1650 + ext4_ext_get_actual_len(ex + 1)); 1651 if (uninitialized) 1652 ext4_ext_mark_uninitialized(ex); 1653 1654 if (ex + 1 < EXT_LAST_EXTENT(eh)) { 1655 len = (EXT_LAST_EXTENT(eh) - ex - 1) 1656 * sizeof(struct ext4_extent); 1657 memmove(ex + 1, ex + 2, len); 1658 } 1659 le16_add_cpu(&eh->eh_entries, -1); 1660 merge_done = 1; 1661 WARN_ON(eh->eh_entries == 0); 1662 if (!eh->eh_entries) 1663 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 1664 } 1665 1666 return merge_done; 1667} 1668 1669/* 1670 * This function does a very simple check to see if we can collapse 1671 * an extent tree with a single extent tree leaf block into the inode. 1672 */ 1673static void ext4_ext_try_to_merge_up(handle_t *handle, 1674 struct inode *inode, 1675 struct ext4_ext_path *path) 1676{ 1677 size_t s; 1678 unsigned max_root = ext4_ext_space_root(inode, 0); 1679 ext4_fsblk_t blk; 1680 1681 if ((path[0].p_depth != 1) || 1682 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1683 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1684 return; 1685 1686 /* 1687 * We need to modify the block allocation bitmap and the block 1688 * group descriptor to release the extent tree block. If we 1689 * can't get the journal credits, give up. 1690 */ 1691 if (ext4_journal_extend(handle, 2)) 1692 return; 1693 1694 /* 1695 * Copy the extent data up to the inode 1696 */ 1697 blk = ext4_idx_pblock(path[0].p_idx); 1698 s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1699 sizeof(struct ext4_extent_idx); 1700 s += sizeof(struct ext4_extent_header); 1701 1702 memcpy(path[0].p_hdr, path[1].p_hdr, s); 1703 path[0].p_depth = 0; 1704 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1705 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1706 path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1707 1708 brelse(path[1].p_bh); 1709 ext4_free_blocks(handle, inode, NULL, blk, 1, 1710 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1711} 1712 1713/* 1714 * This function tries to merge the @ex extent to neighbours in the tree. 1715 * return 1 if merge left else 0. 1716 */ 1717static void ext4_ext_try_to_merge(handle_t *handle, 1718 struct inode *inode, 1719 struct ext4_ext_path *path, 1720 struct ext4_extent *ex) { 1721 struct ext4_extent_header *eh; 1722 unsigned int depth; 1723 int merge_done = 0; 1724 1725 depth = ext_depth(inode); 1726 BUG_ON(path[depth].p_hdr == NULL); 1727 eh = path[depth].p_hdr; 1728 1729 if (ex > EXT_FIRST_EXTENT(eh)) 1730 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1731 1732 if (!merge_done) 1733 (void) ext4_ext_try_to_merge_right(inode, path, ex); 1734 1735 ext4_ext_try_to_merge_up(handle, inode, path); 1736} 1737 1738/* 1739 * check if a portion of the "newext" extent overlaps with an 1740 * existing extent. 1741 * 1742 * If there is an overlap discovered, it updates the length of the newext 1743 * such that there will be no overlap, and then returns 1. 1744 * If there is no overlap found, it returns 0. 1745 */ 1746static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 1747 struct inode *inode, 1748 struct ext4_extent *newext, 1749 struct ext4_ext_path *path) 1750{ 1751 ext4_lblk_t b1, b2; 1752 unsigned int depth, len1; 1753 unsigned int ret = 0; 1754 1755 b1 = le32_to_cpu(newext->ee_block); 1756 len1 = ext4_ext_get_actual_len(newext); 1757 depth = ext_depth(inode); 1758 if (!path[depth].p_ext) 1759 goto out; 1760 b2 = le32_to_cpu(path[depth].p_ext->ee_block); 1761 b2 &= ~(sbi->s_cluster_ratio - 1); 1762 1763 /* 1764 * get the next allocated block if the extent in the path 1765 * is before the requested block(s) 1766 */ 1767 if (b2 < b1) { 1768 b2 = ext4_ext_next_allocated_block(path); 1769 if (b2 == EXT_MAX_BLOCKS) 1770 goto out; 1771 b2 &= ~(sbi->s_cluster_ratio - 1); 1772 } 1773 1774 /* check for wrap through zero on extent logical start block*/ 1775 if (b1 + len1 < b1) { 1776 len1 = EXT_MAX_BLOCKS - b1; 1777 newext->ee_len = cpu_to_le16(len1); 1778 ret = 1; 1779 } 1780 1781 /* check for overlap */ 1782 if (b1 + len1 > b2) { 1783 newext->ee_len = cpu_to_le16(b2 - b1); 1784 ret = 1; 1785 } 1786out: 1787 return ret; 1788} 1789 1790/* 1791 * ext4_ext_insert_extent: 1792 * tries to merge requsted extent into the existing extent or 1793 * inserts requested extent as new one into the tree, 1794 * creating new leaf in the no-space case. 1795 */ 1796int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1797 struct ext4_ext_path *path, 1798 struct ext4_extent *newext, int flag) 1799{ 1800 struct ext4_extent_header *eh; 1801 struct ext4_extent *ex, *fex; 1802 struct ext4_extent *nearex; /* nearest extent */ 1803 struct ext4_ext_path *npath = NULL; 1804 int depth, len, err; 1805 ext4_lblk_t next; 1806 unsigned uninitialized = 0; 1807 int flags = 0; 1808 1809 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1810 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1811 return -EIO; 1812 } 1813 depth = ext_depth(inode); 1814 ex = path[depth].p_ext; 1815 if (unlikely(path[depth].p_hdr == NULL)) { 1816 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1817 return -EIO; 1818 } 1819 1820 /* try to insert block into found extent and return */ 1821 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) 1822 && ext4_can_extents_be_merged(inode, ex, newext)) { 1823 ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n", 1824 ext4_ext_is_uninitialized(newext), 1825 ext4_ext_get_actual_len(newext), 1826 le32_to_cpu(ex->ee_block), 1827 ext4_ext_is_uninitialized(ex), 1828 ext4_ext_get_actual_len(ex), 1829 ext4_ext_pblock(ex)); 1830 err = ext4_ext_get_access(handle, inode, path + depth); 1831 if (err) 1832 return err; 1833 1834 /* 1835 * ext4_can_extents_be_merged should have checked that either 1836 * both extents are uninitialized, or both aren't. Thus we 1837 * need to check only one of them here. 1838 */ 1839 if (ext4_ext_is_uninitialized(ex)) 1840 uninitialized = 1; 1841 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1842 + ext4_ext_get_actual_len(newext)); 1843 if (uninitialized) 1844 ext4_ext_mark_uninitialized(ex); 1845 eh = path[depth].p_hdr; 1846 nearex = ex; 1847 goto merge; 1848 } 1849 1850 depth = ext_depth(inode); 1851 eh = path[depth].p_hdr; 1852 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1853 goto has_space; 1854 1855 /* probably next leaf has space for us? */ 1856 fex = EXT_LAST_EXTENT(eh); 1857 next = EXT_MAX_BLOCKS; 1858 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 1859 next = ext4_ext_next_leaf_block(path); 1860 if (next != EXT_MAX_BLOCKS) { 1861 ext_debug("next leaf block - %u\n", next); 1862 BUG_ON(npath != NULL); 1863 npath = ext4_ext_find_extent(inode, next, NULL); 1864 if (IS_ERR(npath)) 1865 return PTR_ERR(npath); 1866 BUG_ON(npath->p_depth != path->p_depth); 1867 eh = npath[depth].p_hdr; 1868 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 1869 ext_debug("next leaf isn't full(%d)\n", 1870 le16_to_cpu(eh->eh_entries)); 1871 path = npath; 1872 goto has_space; 1873 } 1874 ext_debug("next leaf has no free space(%d,%d)\n", 1875 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 1876 } 1877 1878 /* 1879 * There is no free space in the found leaf. 1880 * We're gonna add a new leaf in the tree. 1881 */ 1882 if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) 1883 flags = EXT4_MB_USE_ROOT_BLOCKS; 1884 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); 1885 if (err) 1886 goto cleanup; 1887 depth = ext_depth(inode); 1888 eh = path[depth].p_hdr; 1889 1890has_space: 1891 nearex = path[depth].p_ext; 1892 1893 err = ext4_ext_get_access(handle, inode, path + depth); 1894 if (err) 1895 goto cleanup; 1896 1897 if (!nearex) { 1898 /* there is no extent in this leaf, create first one */ 1899 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", 1900 le32_to_cpu(newext->ee_block), 1901 ext4_ext_pblock(newext), 1902 ext4_ext_is_uninitialized(newext), 1903 ext4_ext_get_actual_len(newext)); 1904 nearex = EXT_FIRST_EXTENT(eh); 1905 } else { 1906 if (le32_to_cpu(newext->ee_block) 1907 > le32_to_cpu(nearex->ee_block)) { 1908 /* Insert after */ 1909 ext_debug("insert %u:%llu:[%d]%d before: " 1910 "nearest %p\n", 1911 le32_to_cpu(newext->ee_block), 1912 ext4_ext_pblock(newext), 1913 ext4_ext_is_uninitialized(newext), 1914 ext4_ext_get_actual_len(newext), 1915 nearex); 1916 nearex++; 1917 } else { 1918 /* Insert before */ 1919 BUG_ON(newext->ee_block == nearex->ee_block); 1920 ext_debug("insert %u:%llu:[%d]%d after: " 1921 "nearest %p\n", 1922 le32_to_cpu(newext->ee_block), 1923 ext4_ext_pblock(newext), 1924 ext4_ext_is_uninitialized(newext), 1925 ext4_ext_get_actual_len(newext), 1926 nearex); 1927 } 1928 len = EXT_LAST_EXTENT(eh) - nearex + 1; 1929 if (len > 0) { 1930 ext_debug("insert %u:%llu:[%d]%d: " 1931 "move %d extents from 0x%p to 0x%p\n", 1932 le32_to_cpu(newext->ee_block), 1933 ext4_ext_pblock(newext), 1934 ext4_ext_is_uninitialized(newext), 1935 ext4_ext_get_actual_len(newext), 1936 len, nearex, nearex + 1); 1937 memmove(nearex + 1, nearex, 1938 len * sizeof(struct ext4_extent)); 1939 } 1940 } 1941 1942 le16_add_cpu(&eh->eh_entries, 1); 1943 path[depth].p_ext = nearex; 1944 nearex->ee_block = newext->ee_block; 1945 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 1946 nearex->ee_len = newext->ee_len; 1947 1948merge: 1949 /* try to merge extents */ 1950 if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) 1951 ext4_ext_try_to_merge(handle, inode, path, nearex); 1952 1953 1954 /* time to correct all indexes above */ 1955 err = ext4_ext_correct_indexes(handle, inode, path); 1956 if (err) 1957 goto cleanup; 1958 1959 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 1960 1961cleanup: 1962 if (npath) { 1963 ext4_ext_drop_refs(npath); 1964 kfree(npath); 1965 } 1966 ext4_ext_invalidate_cache(inode); 1967 return err; 1968} 1969 1970static int ext4_fill_fiemap_extents(struct inode *inode, 1971 ext4_lblk_t block, ext4_lblk_t num, 1972 struct fiemap_extent_info *fieinfo) 1973{ 1974 struct ext4_ext_path *path = NULL; 1975 struct ext4_ext_cache newex; 1976 struct ext4_extent *ex; 1977 ext4_lblk_t next, next_del, start = 0, end = 0; 1978 ext4_lblk_t last = block + num; 1979 int exists, depth = 0, err = 0; 1980 unsigned int flags = 0; 1981 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 1982 1983 while (block < last && block != EXT_MAX_BLOCKS) { 1984 num = last - block; 1985 /* find extent for this block */ 1986 down_read(&EXT4_I(inode)->i_data_sem); 1987 1988 if (path && ext_depth(inode) != depth) { 1989 /* depth was changed. we have to realloc path */ 1990 kfree(path); 1991 path = NULL; 1992 } 1993 1994 path = ext4_ext_find_extent(inode, block, path); 1995 if (IS_ERR(path)) { 1996 up_read(&EXT4_I(inode)->i_data_sem); 1997 err = PTR_ERR(path); 1998 path = NULL; 1999 break; 2000 } 2001 2002 depth = ext_depth(inode); 2003 if (unlikely(path[depth].p_hdr == NULL)) { 2004 up_read(&EXT4_I(inode)->i_data_sem); 2005 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2006 err = -EIO; 2007 break; 2008 } 2009 ex = path[depth].p_ext; 2010 next = ext4_ext_next_allocated_block(path); 2011 ext4_ext_drop_refs(path); 2012 2013 flags = 0; 2014 exists = 0; 2015 if (!ex) { 2016 /* there is no extent yet, so try to allocate 2017 * all requested space */ 2018 start = block; 2019 end = block + num; 2020 } else if (le32_to_cpu(ex->ee_block) > block) { 2021 /* need to allocate space before found extent */ 2022 start = block; 2023 end = le32_to_cpu(ex->ee_block); 2024 if (block + num < end) 2025 end = block + num; 2026 } else if (block >= le32_to_cpu(ex->ee_block) 2027 + ext4_ext_get_actual_len(ex)) { 2028 /* need to allocate space after found extent */ 2029 start = block; 2030 end = block + num; 2031 if (end >= next) 2032 end = next; 2033 } else if (block >= le32_to_cpu(ex->ee_block)) { 2034 /* 2035 * some part of requested space is covered 2036 * by found extent 2037 */ 2038 start = block; 2039 end = le32_to_cpu(ex->ee_block) 2040 + ext4_ext_get_actual_len(ex); 2041 if (block + num < end) 2042 end = block + num; 2043 exists = 1; 2044 } else { 2045 BUG(); 2046 } 2047 BUG_ON(end <= start); 2048 2049 if (!exists) { 2050 newex.ec_block = start; 2051 newex.ec_len = end - start; 2052 newex.ec_start = 0; 2053 } else { 2054 newex.ec_block = le32_to_cpu(ex->ee_block); 2055 newex.ec_len = ext4_ext_get_actual_len(ex); 2056 newex.ec_start = ext4_ext_pblock(ex); 2057 if (ext4_ext_is_uninitialized(ex)) 2058 flags |= FIEMAP_EXTENT_UNWRITTEN; 2059 } 2060 2061 /* 2062 * Find delayed extent and update newex accordingly. We call 2063 * it even in !exists case to find out whether newex is the 2064 * last existing extent or not. 2065 */ 2066 next_del = ext4_find_delayed_extent(inode, &newex); 2067 if (!exists && next_del) { 2068 exists = 1; 2069 flags |= FIEMAP_EXTENT_DELALLOC; 2070 } 2071 up_read(&EXT4_I(inode)->i_data_sem); 2072 2073 if (unlikely(newex.ec_len == 0)) { 2074 EXT4_ERROR_INODE(inode, "newex.ec_len == 0"); 2075 err = -EIO; 2076 break; 2077 } 2078 2079 /* 2080 * This is possible iff next == next_del == EXT_MAX_BLOCKS. 2081 * we need to check next == EXT_MAX_BLOCKS because it is 2082 * possible that an extent is with unwritten and delayed 2083 * status due to when an extent is delayed allocated and 2084 * is allocated by fallocate status tree will track both of 2085 * them in a extent. 2086 * 2087 * So we could return a unwritten and delayed extent, and 2088 * its block is equal to 'next'. 2089 */ 2090 if (next == next_del && next == EXT_MAX_BLOCKS) { 2091 flags |= FIEMAP_EXTENT_LAST; 2092 if (unlikely(next_del != EXT_MAX_BLOCKS || 2093 next != EXT_MAX_BLOCKS)) { 2094 EXT4_ERROR_INODE(inode, 2095 "next extent == %u, next " 2096 "delalloc extent = %u", 2097 next, next_del); 2098 err = -EIO; 2099 break; 2100 } 2101 } 2102 2103 if (exists) { 2104 err = fiemap_fill_next_extent(fieinfo, 2105 (__u64)newex.ec_block << blksize_bits, 2106 (__u64)newex.ec_start << blksize_bits, 2107 (__u64)newex.ec_len << blksize_bits, 2108 flags); 2109 if (err < 0) 2110 break; 2111 if (err == 1) { 2112 err = 0; 2113 break; 2114 } 2115 } 2116 2117 block = newex.ec_block + newex.ec_len; 2118 } 2119 2120 if (path) { 2121 ext4_ext_drop_refs(path); 2122 kfree(path); 2123 } 2124 2125 return err; 2126} 2127 2128static void 2129ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, 2130 __u32 len, ext4_fsblk_t start) 2131{ 2132 struct ext4_ext_cache *cex; 2133 BUG_ON(len == 0); 2134 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 2135 trace_ext4_ext_put_in_cache(inode, block, len, start); 2136 cex = &EXT4_I(inode)->i_cached_extent; 2137 cex->ec_block = block; 2138 cex->ec_len = len; 2139 cex->ec_start = start; 2140 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 2141} 2142 2143/* 2144 * ext4_ext_put_gap_in_cache: 2145 * calculate boundaries of the gap that the requested block fits into 2146 * and cache this gap 2147 */ 2148static void 2149ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 2150 ext4_lblk_t block) 2151{ 2152 int depth = ext_depth(inode); 2153 unsigned long len; 2154 ext4_lblk_t lblock; 2155 struct ext4_extent *ex; 2156 2157 ex = path[depth].p_ext; 2158 if (ex == NULL) { 2159 /* there is no extent yet, so gap is [0;-] */ 2160 lblock = 0; 2161 len = EXT_MAX_BLOCKS; 2162 ext_debug("cache gap(whole file):"); 2163 } else if (block < le32_to_cpu(ex->ee_block)) { 2164 lblock = block; 2165 len = le32_to_cpu(ex->ee_block) - block; 2166 ext_debug("cache gap(before): %u [%u:%u]", 2167 block, 2168 le32_to_cpu(ex->ee_block), 2169 ext4_ext_get_actual_len(ex)); 2170 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2171 ext4_es_insert_extent(inode, lblock, len, ~0, 2172 EXTENT_STATUS_HOLE); 2173 } else if (block >= le32_to_cpu(ex->ee_block) 2174 + ext4_ext_get_actual_len(ex)) { 2175 ext4_lblk_t next; 2176 lblock = le32_to_cpu(ex->ee_block) 2177 + ext4_ext_get_actual_len(ex); 2178 2179 next = ext4_ext_next_allocated_block(path); 2180 ext_debug("cache gap(after): [%u:%u] %u", 2181 le32_to_cpu(ex->ee_block), 2182 ext4_ext_get_actual_len(ex), 2183 block); 2184 BUG_ON(next == lblock); 2185 len = next - lblock; 2186 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2187 ext4_es_insert_extent(inode, lblock, len, ~0, 2188 EXTENT_STATUS_HOLE); 2189 } else { 2190 lblock = len = 0; 2191 BUG(); 2192 } 2193 2194 ext_debug(" -> %u:%lu\n", lblock, len); 2195 ext4_ext_put_in_cache(inode, lblock, len, 0); 2196} 2197 2198/* 2199 * ext4_ext_in_cache() 2200 * Checks to see if the given block is in the cache. 2201 * If it is, the cached extent is stored in the given 2202 * cache extent pointer. 2203 * 2204 * @inode: The files inode 2205 * @block: The block to look for in the cache 2206 * @ex: Pointer where the cached extent will be stored 2207 * if it contains block 2208 * 2209 * Return 0 if cache is invalid; 1 if the cache is valid 2210 */ 2211static int 2212ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, 2213 struct ext4_extent *ex) 2214{ 2215 struct ext4_ext_cache *cex; 2216 int ret = 0; 2217 2218 /* 2219 * We borrow i_block_reservation_lock to protect i_cached_extent 2220 */ 2221 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 2222 cex = &EXT4_I(inode)->i_cached_extent; 2223 2224 /* has cache valid data? */ 2225 if (cex->ec_len == 0) 2226 goto errout; 2227 2228 if (in_range(block, cex->ec_block, cex->ec_len)) { 2229 ex->ee_block = cpu_to_le32(cex->ec_block); 2230 ext4_ext_store_pblock(ex, cex->ec_start); 2231 ex->ee_len = cpu_to_le16(cex->ec_len); 2232 ext_debug("%u cached by %u:%u:%llu\n", 2233 block, 2234 cex->ec_block, cex->ec_len, cex->ec_start); 2235 ret = 1; 2236 } 2237errout: 2238 trace_ext4_ext_in_cache(inode, block, ret); 2239 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 2240 return ret; 2241} 2242 2243/* 2244 * ext4_ext_rm_idx: 2245 * removes index from the index block. 2246 */ 2247static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2248 struct ext4_ext_path *path, int depth) 2249{ 2250 int err; 2251 ext4_fsblk_t leaf; 2252 2253 /* free index block */ 2254 depth--; 2255 path = path + depth; 2256 leaf = ext4_idx_pblock(path->p_idx); 2257 if (unlikely(path->p_hdr->eh_entries == 0)) { 2258 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2259 return -EIO; 2260 } 2261 err = ext4_ext_get_access(handle, inode, path); 2262 if (err) 2263 return err; 2264 2265 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 2266 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 2267 len *= sizeof(struct ext4_extent_idx); 2268 memmove(path->p_idx, path->p_idx + 1, len); 2269 } 2270 2271 le16_add_cpu(&path->p_hdr->eh_entries, -1); 2272 err = ext4_ext_dirty(handle, inode, path); 2273 if (err) 2274 return err; 2275 ext_debug("index is empty, remove it, free block %llu\n", leaf); 2276 trace_ext4_ext_rm_idx(inode, leaf); 2277 2278 ext4_free_blocks(handle, inode, NULL, leaf, 1, 2279 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2280 2281 while (--depth >= 0) { 2282 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2283 break; 2284 path--; 2285 err = ext4_ext_get_access(handle, inode, path); 2286 if (err) 2287 break; 2288 path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2289 err = ext4_ext_dirty(handle, inode, path); 2290 if (err) 2291 break; 2292 } 2293 return err; 2294} 2295 2296/* 2297 * ext4_ext_calc_credits_for_single_extent: 2298 * This routine returns max. credits that needed to insert an extent 2299 * to the extent tree. 2300 * When pass the actual path, the caller should calculate credits 2301 * under i_data_sem. 2302 */ 2303int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2304 struct ext4_ext_path *path) 2305{ 2306 if (path) { 2307 int depth = ext_depth(inode); 2308 int ret = 0; 2309 2310 /* probably there is space in leaf? */ 2311 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2312 < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2313 2314 /* 2315 * There are some space in the leaf tree, no 2316 * need to account for leaf block credit 2317 * 2318 * bitmaps and block group descriptor blocks 2319 * and other metadata blocks still need to be 2320 * accounted. 2321 */ 2322 /* 1 bitmap, 1 block group descriptor */ 2323 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 2324 return ret; 2325 } 2326 } 2327 2328 return ext4_chunk_trans_blocks(inode, nrblocks); 2329} 2330 2331/* 2332 * How many index/leaf blocks need to change/allocate to modify nrblocks? 2333 * 2334 * if nrblocks are fit in a single extent (chunk flag is 1), then 2335 * in the worse case, each tree level index/leaf need to be changed 2336 * if the tree split due to insert a new extent, then the old tree 2337 * index/leaf need to be updated too 2338 * 2339 * If the nrblocks are discontiguous, they could cause 2340 * the whole tree split more than once, but this is really rare. 2341 */ 2342int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 2343{ 2344 int index; 2345 int depth; 2346 2347 /* If we are converting the inline data, only one is needed here. */ 2348 if (ext4_has_inline_data(inode)) 2349 return 1; 2350 2351 depth = ext_depth(inode); 2352 2353 if (chunk) 2354 index = depth * 2; 2355 else 2356 index = depth * 3; 2357 2358 return index; 2359} 2360 2361static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2362 struct ext4_extent *ex, 2363 ext4_fsblk_t *partial_cluster, 2364 ext4_lblk_t from, ext4_lblk_t to) 2365{ 2366 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2367 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2368 ext4_fsblk_t pblk; 2369 int flags = 0; 2370 2371 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2372 flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2373 else if (ext4_should_journal_data(inode)) 2374 flags |= EXT4_FREE_BLOCKS_FORGET; 2375 2376 /* 2377 * For bigalloc file systems, we never free a partial cluster 2378 * at the beginning of the extent. Instead, we make a note 2379 * that we tried freeing the cluster, and check to see if we 2380 * need to free it on a subsequent call to ext4_remove_blocks, 2381 * or at the end of the ext4_truncate() operation. 2382 */ 2383 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 2384 2385 trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); 2386 /* 2387 * If we have a partial cluster, and it's different from the 2388 * cluster of the last block, we need to explicitly free the 2389 * partial cluster here. 2390 */ 2391 pblk = ext4_ext_pblock(ex) + ee_len - 1; 2392 if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { 2393 ext4_free_blocks(handle, inode, NULL, 2394 EXT4_C2B(sbi, *partial_cluster), 2395 sbi->s_cluster_ratio, flags); 2396 *partial_cluster = 0; 2397 } 2398 2399#ifdef EXTENTS_STATS 2400 { 2401 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2402 spin_lock(&sbi->s_ext_stats_lock); 2403 sbi->s_ext_blocks += ee_len; 2404 sbi->s_ext_extents++; 2405 if (ee_len < sbi->s_ext_min) 2406 sbi->s_ext_min = ee_len; 2407 if (ee_len > sbi->s_ext_max) 2408 sbi->s_ext_max = ee_len; 2409 if (ext_depth(inode) > sbi->s_depth_max) 2410 sbi->s_depth_max = ext_depth(inode); 2411 spin_unlock(&sbi->s_ext_stats_lock); 2412 } 2413#endif 2414 if (from >= le32_to_cpu(ex->ee_block) 2415 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2416 /* tail removal */ 2417 ext4_lblk_t num; 2418 2419 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2420 pblk = ext4_ext_pblock(ex) + ee_len - num; 2421 ext_debug("free last %u blocks starting %llu\n", num, pblk); 2422 ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 2423 /* 2424 * If the block range to be freed didn't start at the 2425 * beginning of a cluster, and we removed the entire 2426 * extent, save the partial cluster here, since we 2427 * might need to delete if we determine that the 2428 * truncate operation has removed all of the blocks in 2429 * the cluster. 2430 */ 2431 if (pblk & (sbi->s_cluster_ratio - 1) && 2432 (ee_len == num)) 2433 *partial_cluster = EXT4_B2C(sbi, pblk); 2434 else 2435 *partial_cluster = 0; 2436 } else if (from == le32_to_cpu(ex->ee_block) 2437 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2438 /* head removal */ 2439 ext4_lblk_t num; 2440 ext4_fsblk_t start; 2441 2442 num = to - from; 2443 start = ext4_ext_pblock(ex); 2444 2445 ext_debug("free first %u blocks starting %llu\n", num, start); 2446 ext4_free_blocks(handle, inode, NULL, start, num, flags); 2447 2448 } else { 2449 printk(KERN_INFO "strange request: removal(2) " 2450 "%u-%u from %u:%u\n", 2451 from, to, le32_to_cpu(ex->ee_block), ee_len); 2452 } 2453 return 0; 2454} 2455 2456 2457/* 2458 * ext4_ext_rm_leaf() Removes the extents associated with the 2459 * blocks appearing between "start" and "end", and splits the extents 2460 * if "start" and "end" appear in the same extent 2461 * 2462 * @handle: The journal handle 2463 * @inode: The files inode 2464 * @path: The path to the leaf 2465 * @start: The first block to remove 2466 * @end: The last block to remove 2467 */ 2468static int 2469ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2470 struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster, 2471 ext4_lblk_t start, ext4_lblk_t end) 2472{ 2473 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2474 int err = 0, correct_index = 0; 2475 int depth = ext_depth(inode), credits; 2476 struct ext4_extent_header *eh; 2477 ext4_lblk_t a, b; 2478 unsigned num; 2479 ext4_lblk_t ex_ee_block; 2480 unsigned short ex_ee_len; 2481 unsigned uninitialized = 0; 2482 struct ext4_extent *ex; 2483 2484 /* the header must be checked already in ext4_ext_remove_space() */ 2485 ext_debug("truncate since %u in leaf to %u\n", start, end); 2486 if (!path[depth].p_hdr) 2487 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2488 eh = path[depth].p_hdr; 2489 if (unlikely(path[depth].p_hdr == NULL)) { 2490 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2491 return -EIO; 2492 } 2493 /* find where to start removing */ 2494 ex = EXT_LAST_EXTENT(eh); 2495 2496 ex_ee_block = le32_to_cpu(ex->ee_block); 2497 ex_ee_len = ext4_ext_get_actual_len(ex); 2498 2499 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2500 2501 while (ex >= EXT_FIRST_EXTENT(eh) && 2502 ex_ee_block + ex_ee_len > start) { 2503 2504 if (ext4_ext_is_uninitialized(ex)) 2505 uninitialized = 1; 2506 else 2507 uninitialized = 0; 2508 2509 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2510 uninitialized, ex_ee_len); 2511 path[depth].p_ext = ex; 2512 2513 a = ex_ee_block > start ? ex_ee_block : start; 2514 b = ex_ee_block+ex_ee_len - 1 < end ? 2515 ex_ee_block+ex_ee_len - 1 : end; 2516 2517 ext_debug(" border %u:%u\n", a, b); 2518 2519 /* If this extent is beyond the end of the hole, skip it */ 2520 if (end < ex_ee_block) { 2521 ex--; 2522 ex_ee_block = le32_to_cpu(ex->ee_block); 2523 ex_ee_len = ext4_ext_get_actual_len(ex); 2524 continue; 2525 } else if (b != ex_ee_block + ex_ee_len - 1) { 2526 EXT4_ERROR_INODE(inode, 2527 "can not handle truncate %u:%u " 2528 "on extent %u:%u", 2529 start, end, ex_ee_block, 2530 ex_ee_block + ex_ee_len - 1); 2531 err = -EIO; 2532 goto out; 2533 } else if (a != ex_ee_block) { 2534 /* remove tail of the extent */ 2535 num = a - ex_ee_block; 2536 } else { 2537 /* remove whole extent: excellent! */ 2538 num = 0; 2539 } 2540 /* 2541 * 3 for leaf, sb, and inode plus 2 (bmap and group 2542 * descriptor) for each block group; assume two block 2543 * groups plus ex_ee_len/blocks_per_block_group for 2544 * the worst case 2545 */ 2546 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2547 if (ex == EXT_FIRST_EXTENT(eh)) { 2548 correct_index = 1; 2549 credits += (ext_depth(inode)) + 1; 2550 } 2551 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2552 2553 err = ext4_ext_truncate_extend_restart(handle, inode, credits); 2554 if (err) 2555 goto out; 2556 2557 err = ext4_ext_get_access(handle, inode, path + depth); 2558 if (err) 2559 goto out; 2560 2561 err = ext4_remove_blocks(handle, inode, ex, partial_cluster, 2562 a, b); 2563 if (err) 2564 goto out; 2565 2566 if (num == 0) 2567 /* this extent is removed; mark slot entirely unused */ 2568 ext4_ext_store_pblock(ex, 0); 2569 2570 ex->ee_len = cpu_to_le16(num); 2571 /* 2572 * Do not mark uninitialized if all the blocks in the 2573 * extent have been removed. 2574 */ 2575 if (uninitialized && num) 2576 ext4_ext_mark_uninitialized(ex); 2577 /* 2578 * If the extent was completely released, 2579 * we need to remove it from the leaf 2580 */ 2581 if (num == 0) { 2582 if (end != EXT_MAX_BLOCKS - 1) { 2583 /* 2584 * For hole punching, we need to scoot all the 2585 * extents up when an extent is removed so that 2586 * we dont have blank extents in the middle 2587 */ 2588 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2589 sizeof(struct ext4_extent)); 2590 2591 /* Now get rid of the one at the end */ 2592 memset(EXT_LAST_EXTENT(eh), 0, 2593 sizeof(struct ext4_extent)); 2594 } 2595 le16_add_cpu(&eh->eh_entries, -1); 2596 } else 2597 *partial_cluster = 0; 2598 2599 err = ext4_ext_dirty(handle, inode, path + depth); 2600 if (err) 2601 goto out; 2602 2603 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, 2604 ext4_ext_pblock(ex)); 2605 ex--; 2606 ex_ee_block = le32_to_cpu(ex->ee_block); 2607 ex_ee_len = ext4_ext_get_actual_len(ex); 2608 } 2609 2610 if (correct_index && eh->eh_entries) 2611 err = ext4_ext_correct_indexes(handle, inode, path); 2612 2613 /* 2614 * If there is still a entry in the leaf node, check to see if 2615 * it references the partial cluster. This is the only place 2616 * where it could; if it doesn't, we can free the cluster. 2617 */ 2618 if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) && 2619 (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != 2620 *partial_cluster)) { 2621 int flags = EXT4_FREE_BLOCKS_FORGET; 2622 2623 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2624 flags |= EXT4_FREE_BLOCKS_METADATA; 2625 2626 ext4_free_blocks(handle, inode, NULL, 2627 EXT4_C2B(sbi, *partial_cluster), 2628 sbi->s_cluster_ratio, flags); 2629 *partial_cluster = 0; 2630 } 2631 2632 /* if this leaf is free, then we should 2633 * remove it from index block above */ 2634 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2635 err = ext4_ext_rm_idx(handle, inode, path, depth); 2636 2637out: 2638 return err; 2639} 2640 2641/* 2642 * ext4_ext_more_to_rm: 2643 * returns 1 if current index has to be freed (even partial) 2644 */ 2645static int 2646ext4_ext_more_to_rm(struct ext4_ext_path *path) 2647{ 2648 BUG_ON(path->p_idx == NULL); 2649 2650 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2651 return 0; 2652 2653 /* 2654 * if truncate on deeper level happened, it wasn't partial, 2655 * so we have to consider current index for truncation 2656 */ 2657 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2658 return 0; 2659 return 1; 2660} 2661 2662static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 2663 ext4_lblk_t end) 2664{ 2665 struct super_block *sb = inode->i_sb; 2666 int depth = ext_depth(inode); 2667 struct ext4_ext_path *path = NULL; 2668 ext4_fsblk_t partial_cluster = 0; 2669 handle_t *handle; 2670 int i = 0, err = 0; 2671 2672 ext_debug("truncate since %u to %u\n", start, end); 2673 2674 /* probably first extent we're gonna free will be last in block */ 2675 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1); 2676 if (IS_ERR(handle)) 2677 return PTR_ERR(handle); 2678 2679again: 2680 ext4_ext_invalidate_cache(inode); 2681 2682 trace_ext4_ext_remove_space(inode, start, depth); 2683 2684 /* 2685 * Check if we are removing extents inside the extent tree. If that 2686 * is the case, we are going to punch a hole inside the extent tree 2687 * so we have to check whether we need to split the extent covering 2688 * the last block to remove so we can easily remove the part of it 2689 * in ext4_ext_rm_leaf(). 2690 */ 2691 if (end < EXT_MAX_BLOCKS - 1) { 2692 struct ext4_extent *ex; 2693 ext4_lblk_t ee_block; 2694 2695 /* find extent for this block */ 2696 path = ext4_ext_find_extent(inode, end, NULL); 2697 if (IS_ERR(path)) { 2698 ext4_journal_stop(handle); 2699 return PTR_ERR(path); 2700 } 2701 depth = ext_depth(inode); 2702 /* Leaf not may not exist only if inode has no blocks at all */ 2703 ex = path[depth].p_ext; 2704 if (!ex) { 2705 if (depth) { 2706 EXT4_ERROR_INODE(inode, 2707 "path[%d].p_hdr == NULL", 2708 depth); 2709 err = -EIO; 2710 } 2711 goto out; 2712 } 2713 2714 ee_block = le32_to_cpu(ex->ee_block); 2715 2716 /* 2717 * See if the last block is inside the extent, if so split 2718 * the extent at 'end' block so we can easily remove the 2719 * tail of the first part of the split extent in 2720 * ext4_ext_rm_leaf(). 2721 */ 2722 if (end >= ee_block && 2723 end < ee_block + ext4_ext_get_actual_len(ex) - 1) { 2724 int split_flag = 0; 2725 2726 if (ext4_ext_is_uninitialized(ex)) 2727 split_flag = EXT4_EXT_MARK_UNINIT1 | 2728 EXT4_EXT_MARK_UNINIT2; 2729 2730 /* 2731 * Split the extent in two so that 'end' is the last 2732 * block in the first new extent 2733 */ 2734 err = ext4_split_extent_at(handle, inode, path, 2735 end + 1, split_flag, 2736 EXT4_GET_BLOCKS_PRE_IO | 2737 EXT4_GET_BLOCKS_PUNCH_OUT_EXT); 2738 2739 if (err < 0) 2740 goto out; 2741 } 2742 } 2743 /* 2744 * We start scanning from right side, freeing all the blocks 2745 * after i_size and walking into the tree depth-wise. 2746 */ 2747 depth = ext_depth(inode); 2748 if (path) { 2749 int k = i = depth; 2750 while (--k > 0) 2751 path[k].p_block = 2752 le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2753 } else { 2754 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), 2755 GFP_NOFS); 2756 if (path == NULL) { 2757 ext4_journal_stop(handle); 2758 return -ENOMEM; 2759 } 2760 path[0].p_depth = depth; 2761 path[0].p_hdr = ext_inode_hdr(inode); 2762 i = 0; 2763 2764 if (ext4_ext_check(inode, path[0].p_hdr, depth)) { 2765 err = -EIO; 2766 goto out; 2767 } 2768 } 2769 err = 0; 2770 2771 while (i >= 0 && err == 0) { 2772 if (i == depth) { 2773 /* this is leaf block */ 2774 err = ext4_ext_rm_leaf(handle, inode, path, 2775 &partial_cluster, start, 2776 end); 2777 /* root level has p_bh == NULL, brelse() eats this */ 2778 brelse(path[i].p_bh); 2779 path[i].p_bh = NULL; 2780 i--; 2781 continue; 2782 } 2783 2784 /* this is index block */ 2785 if (!path[i].p_hdr) { 2786 ext_debug("initialize header\n"); 2787 path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2788 } 2789 2790 if (!path[i].p_idx) { 2791 /* this level hasn't been touched yet */ 2792 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2793 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2794 ext_debug("init index ptr: hdr 0x%p, num %d\n", 2795 path[i].p_hdr, 2796 le16_to_cpu(path[i].p_hdr->eh_entries)); 2797 } else { 2798 /* we were already here, see at next index */ 2799 path[i].p_idx--; 2800 } 2801 2802 ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2803 i, EXT_FIRST_INDEX(path[i].p_hdr), 2804 path[i].p_idx); 2805 if (ext4_ext_more_to_rm(path + i)) { 2806 struct buffer_head *bh; 2807 /* go to the next level */ 2808 ext_debug("move to level %d (block %llu)\n", 2809 i + 1, ext4_idx_pblock(path[i].p_idx)); 2810 memset(path + i + 1, 0, sizeof(*path)); 2811 bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); 2812 if (!bh) { 2813 /* should we reset i_size? */ 2814 err = -EIO; 2815 break; 2816 } 2817 if (WARN_ON(i + 1 > depth)) { 2818 err = -EIO; 2819 break; 2820 } 2821 if (ext4_ext_check_block(inode, ext_block_hdr(bh), 2822 depth - i - 1, bh)) { 2823 err = -EIO; 2824 break; 2825 } 2826 path[i + 1].p_bh = bh; 2827 2828 /* save actual number of indexes since this 2829 * number is changed at the next iteration */ 2830 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2831 i++; 2832 } else { 2833 /* we finished processing this index, go up */ 2834 if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2835 /* index is empty, remove it; 2836 * handle must be already prepared by the 2837 * truncatei_leaf() */ 2838 err = ext4_ext_rm_idx(handle, inode, path, i); 2839 } 2840 /* root level has p_bh == NULL, brelse() eats this */ 2841 brelse(path[i].p_bh); 2842 path[i].p_bh = NULL; 2843 i--; 2844 ext_debug("return to level %d\n", i); 2845 } 2846 } 2847 2848 trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster, 2849 path->p_hdr->eh_entries); 2850 2851 /* If we still have something in the partial cluster and we have removed 2852 * even the first extent, then we should free the blocks in the partial 2853 * cluster as well. */ 2854 if (partial_cluster && path->p_hdr->eh_entries == 0) { 2855 int flags = EXT4_FREE_BLOCKS_FORGET; 2856 2857 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2858 flags |= EXT4_FREE_BLOCKS_METADATA; 2859 2860 ext4_free_blocks(handle, inode, NULL, 2861 EXT4_C2B(EXT4_SB(sb), partial_cluster), 2862 EXT4_SB(sb)->s_cluster_ratio, flags); 2863 partial_cluster = 0; 2864 } 2865 2866 /* TODO: flexible tree reduction should be here */ 2867 if (path->p_hdr->eh_entries == 0) { 2868 /* 2869 * truncate to zero freed all the tree, 2870 * so we need to correct eh_depth 2871 */ 2872 err = ext4_ext_get_access(handle, inode, path); 2873 if (err == 0) { 2874 ext_inode_hdr(inode)->eh_depth = 0; 2875 ext_inode_hdr(inode)->eh_max = 2876 cpu_to_le16(ext4_ext_space_root(inode, 0)); 2877 err = ext4_ext_dirty(handle, inode, path); 2878 } 2879 } 2880out: 2881 ext4_ext_drop_refs(path); 2882 kfree(path); 2883 if (err == -EAGAIN) { 2884 path = NULL; 2885 goto again; 2886 } 2887 ext4_journal_stop(handle); 2888 2889 return err; 2890} 2891 2892/* 2893 * called at mount time 2894 */ 2895void ext4_ext_init(struct super_block *sb) 2896{ 2897 /* 2898 * possible initialization would be here 2899 */ 2900 2901 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 2902#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 2903 printk(KERN_INFO "EXT4-fs: file extents enabled" 2904#ifdef AGGRESSIVE_TEST 2905 ", aggressive tests" 2906#endif 2907#ifdef CHECK_BINSEARCH 2908 ", check binsearch" 2909#endif 2910#ifdef EXTENTS_STATS 2911 ", stats" 2912#endif 2913 "\n"); 2914#endif 2915#ifdef EXTENTS_STATS 2916 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 2917 EXT4_SB(sb)->s_ext_min = 1 << 30; 2918 EXT4_SB(sb)->s_ext_max = 0; 2919#endif 2920 } 2921} 2922 2923/* 2924 * called at umount time 2925 */ 2926void ext4_ext_release(struct super_block *sb) 2927{ 2928 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 2929 return; 2930 2931#ifdef EXTENTS_STATS 2932 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 2933 struct ext4_sb_info *sbi = EXT4_SB(sb); 2934 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 2935 sbi->s_ext_blocks, sbi->s_ext_extents, 2936 sbi->s_ext_blocks / sbi->s_ext_extents); 2937 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 2938 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 2939 } 2940#endif 2941} 2942 2943/* FIXME!! we need to try to merge to left or right after zero-out */ 2944static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 2945{ 2946 ext4_fsblk_t ee_pblock; 2947 unsigned int ee_len; 2948 int ret; 2949 2950 ee_len = ext4_ext_get_actual_len(ex); 2951 ee_pblock = ext4_ext_pblock(ex); 2952 2953 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 2954 if (ret > 0) 2955 ret = 0; 2956 2957 return ret; 2958} 2959 2960/* 2961 * ext4_split_extent_at() splits an extent at given block. 2962 * 2963 * @handle: the journal handle 2964 * @inode: the file inode 2965 * @path: the path to the extent 2966 * @split: the logical block where the extent is splitted. 2967 * @split_flags: indicates if the extent could be zeroout if split fails, and 2968 * the states(init or uninit) of new extents. 2969 * @flags: flags used to insert new extent to extent tree. 2970 * 2971 * 2972 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 2973 * of which are deterimined by split_flag. 2974 * 2975 * There are two cases: 2976 * a> the extent are splitted into two extent. 2977 * b> split is not needed, and just mark the extent. 2978 * 2979 * return 0 on success. 2980 */ 2981static int ext4_split_extent_at(handle_t *handle, 2982 struct inode *inode, 2983 struct ext4_ext_path *path, 2984 ext4_lblk_t split, 2985 int split_flag, 2986 int flags) 2987{ 2988 ext4_fsblk_t newblock; 2989 ext4_lblk_t ee_block; 2990 struct ext4_extent *ex, newex, orig_ex; 2991 struct ext4_extent *ex2 = NULL; 2992 unsigned int ee_len, depth; 2993 int err = 0; 2994 2995 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 2996 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 2997 2998 ext_debug("ext4_split_extents_at: inode %lu, logical" 2999 "block %llu\n", inode->i_ino, (unsigned long long)split); 3000 3001 ext4_ext_show_leaf(inode, path); 3002 3003 depth = ext_depth(inode); 3004 ex = path[depth].p_ext; 3005 ee_block = le32_to_cpu(ex->ee_block); 3006 ee_len = ext4_ext_get_actual_len(ex); 3007 newblock = split - ee_block + ext4_ext_pblock(ex); 3008 3009 BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3010 3011 err = ext4_ext_get_access(handle, inode, path + depth); 3012 if (err) 3013 goto out; 3014 3015 if (split == ee_block) { 3016 /* 3017 * case b: block @split is the block that the extent begins with 3018 * then we just change the state of the extent, and splitting 3019 * is not needed. 3020 */ 3021 if (split_flag & EXT4_EXT_MARK_UNINIT2) 3022 ext4_ext_mark_uninitialized(ex); 3023 else 3024 ext4_ext_mark_initialized(ex); 3025 3026 if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3027 ext4_ext_try_to_merge(handle, inode, path, ex); 3028 3029 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3030 goto out; 3031 } 3032 3033 /* case a */ 3034 memcpy(&orig_ex, ex, sizeof(orig_ex)); 3035 ex->ee_len = cpu_to_le16(split - ee_block); 3036 if (split_flag & EXT4_EXT_MARK_UNINIT1) 3037 ext4_ext_mark_uninitialized(ex); 3038 3039 /* 3040 * path may lead to new leaf, not to original leaf any more 3041 * after ext4_ext_insert_extent() returns, 3042 */ 3043 err = ext4_ext_dirty(handle, inode, path + depth); 3044 if (err) 3045 goto fix_extent_len; 3046 3047 ex2 = &newex; 3048 ex2->ee_block = cpu_to_le32(split); 3049 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 3050 ext4_ext_store_pblock(ex2, newblock); 3051 if (split_flag & EXT4_EXT_MARK_UNINIT2) 3052 ext4_ext_mark_uninitialized(ex2); 3053 3054 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3055 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3056 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3057 if (split_flag & EXT4_EXT_DATA_VALID1) 3058 err = ext4_ext_zeroout(inode, ex2); 3059 else 3060 err = ext4_ext_zeroout(inode, ex); 3061 } else 3062 err = ext4_ext_zeroout(inode, &orig_ex); 3063 3064 if (err) 3065 goto fix_extent_len; 3066 /* update the extent length and mark as initialized */ 3067 ex->ee_len = cpu_to_le16(ee_len); 3068 ext4_ext_try_to_merge(handle, inode, path, ex); 3069 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3070 goto out; 3071 } else if (err) 3072 goto fix_extent_len; 3073 3074out: 3075 ext4_ext_show_leaf(inode, path); 3076 return err; 3077 3078fix_extent_len: 3079 ex->ee_len = orig_ex.ee_len; 3080 ext4_ext_dirty(handle, inode, path + depth); 3081 return err; 3082} 3083 3084/* 3085 * ext4_split_extents() splits an extent and mark extent which is covered 3086 * by @map as split_flags indicates 3087 * 3088 * It may result in splitting the extent into multiple extents (upto three) 3089 * There are three possibilities: 3090 * a> There is no split required 3091 * b> Splits in two extents: Split is happening at either end of the extent 3092 * c> Splits in three extents: Somone is splitting in middle of the extent 3093 * 3094 */ 3095static int ext4_split_extent(handle_t *handle, 3096 struct inode *inode, 3097 struct ext4_ext_path *path, 3098 struct ext4_map_blocks *map, 3099 int split_flag, 3100 int flags) 3101{ 3102 ext4_lblk_t ee_block; 3103 struct ext4_extent *ex; 3104 unsigned int ee_len, depth; 3105 int err = 0; 3106 int uninitialized; 3107 int split_flag1, flags1; 3108 3109 depth = ext_depth(inode); 3110 ex = path[depth].p_ext; 3111 ee_block = le32_to_cpu(ex->ee_block); 3112 ee_len = ext4_ext_get_actual_len(ex); 3113 uninitialized = ext4_ext_is_uninitialized(ex); 3114 3115 if (map->m_lblk + map->m_len < ee_block + ee_len) { 3116 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 3117 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3118 if (uninitialized) 3119 split_flag1 |= EXT4_EXT_MARK_UNINIT1 | 3120 EXT4_EXT_MARK_UNINIT2; 3121 if (split_flag & EXT4_EXT_DATA_VALID2) 3122 split_flag1 |= EXT4_EXT_DATA_VALID1; 3123 err = ext4_split_extent_at(handle, inode, path, 3124 map->m_lblk + map->m_len, split_flag1, flags1); 3125 if (err) 3126 goto out; 3127 } 3128 3129 ext4_ext_drop_refs(path); 3130 path = ext4_ext_find_extent(inode, map->m_lblk, path); 3131 if (IS_ERR(path)) 3132 return PTR_ERR(path); 3133 3134 if (map->m_lblk >= ee_block) { 3135 split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT | 3136 EXT4_EXT_DATA_VALID2); 3137 if (uninitialized) 3138 split_flag1 |= EXT4_EXT_MARK_UNINIT1; 3139 if (split_flag & EXT4_EXT_MARK_UNINIT2) 3140 split_flag1 |= EXT4_EXT_MARK_UNINIT2; 3141 err = ext4_split_extent_at(handle, inode, path, 3142 map->m_lblk, split_flag1, flags); 3143 if (err) 3144 goto out; 3145 } 3146 3147 ext4_ext_show_leaf(inode, path); 3148out: 3149 return err ? err : map->m_len; 3150} 3151 3152/* 3153 * This function is called by ext4_ext_map_blocks() if someone tries to write 3154 * to an uninitialized extent. It may result in splitting the uninitialized 3155 * extent into multiple extents (up to three - one initialized and two 3156 * uninitialized). 3157 * There are three possibilities: 3158 * a> There is no split required: Entire extent should be initialized 3159 * b> Splits in two extents: Write is happening at either end of the extent 3160 * c> Splits in three extents: Somone is writing in middle of the extent 3161 * 3162 * Pre-conditions: 3163 * - The extent pointed to by 'path' is uninitialized. 3164 * - The extent pointed to by 'path' contains a superset 3165 * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 3166 * 3167 * Post-conditions on success: 3168 * - the returned value is the number of blocks beyond map->l_lblk 3169 * that are allocated and initialized. 3170 * It is guaranteed to be >= map->m_len. 3171 */ 3172static int ext4_ext_convert_to_initialized(handle_t *handle, 3173 struct inode *inode, 3174 struct ext4_map_blocks *map, 3175 struct ext4_ext_path *path) 3176{ 3177 struct ext4_sb_info *sbi; 3178 struct ext4_extent_header *eh; 3179 struct ext4_map_blocks split_map; 3180 struct ext4_extent zero_ex; 3181 struct ext4_extent *ex; 3182 ext4_lblk_t ee_block, eof_block; 3183 unsigned int ee_len, depth; 3184 int allocated, max_zeroout = 0; 3185 int err = 0; 3186 int split_flag = 0; 3187 3188 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 3189 "block %llu, max_blocks %u\n", inode->i_ino, 3190 (unsigned long long)map->m_lblk, map->m_len); 3191 3192 sbi = EXT4_SB(inode->i_sb); 3193 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 3194 inode->i_sb->s_blocksize_bits; 3195 if (eof_block < map->m_lblk + map->m_len) 3196 eof_block = map->m_lblk + map->m_len; 3197 3198 depth = ext_depth(inode); 3199 eh = path[depth].p_hdr; 3200 ex = path[depth].p_ext; 3201 ee_block = le32_to_cpu(ex->ee_block); 3202 ee_len = ext4_ext_get_actual_len(ex); 3203 allocated = ee_len - (map->m_lblk - ee_block); 3204 3205 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3206 3207 /* Pre-conditions */ 3208 BUG_ON(!ext4_ext_is_uninitialized(ex)); 3209 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 3210 3211 /* 3212 * Attempt to transfer newly initialized blocks from the currently 3213 * uninitialized extent to its left neighbor. This is much cheaper 3214 * than an insertion followed by a merge as those involve costly 3215 * memmove() calls. This is the common case in steady state for 3216 * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append 3217 * writes. 3218 * 3219 * Limitations of the current logic: 3220 * - L1: we only deal with writes at the start of the extent. 3221 * The approach could be extended to writes at the end 3222 * of the extent but this scenario was deemed less common. 3223 * - L2: we do not deal with writes covering the whole extent. 3224 * This would require removing the extent if the transfer 3225 * is possible. 3226 * - L3: we only attempt to merge with an extent stored in the 3227 * same extent tree node. 3228 */ 3229 if ((map->m_lblk == ee_block) && /*L1*/ 3230 (map->m_len < ee_len) && /*L2*/ 3231 (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/ 3232 struct ext4_extent *prev_ex; 3233 ext4_lblk_t prev_lblk; 3234 ext4_fsblk_t prev_pblk, ee_pblk; 3235 unsigned int prev_len, write_len; 3236 3237 prev_ex = ex - 1; 3238 prev_lblk = le32_to_cpu(prev_ex->ee_block); 3239 prev_len = ext4_ext_get_actual_len(prev_ex); 3240 prev_pblk = ext4_ext_pblock(prev_ex); 3241 ee_pblk = ext4_ext_pblock(ex); 3242 write_len = map->m_len; 3243 3244 /* 3245 * A transfer of blocks from 'ex' to 'prev_ex' is allowed 3246 * upon those conditions: 3247 * - C1: prev_ex is initialized, 3248 * - C2: prev_ex is logically abutting ex, 3249 * - C3: prev_ex is physically abutting ex, 3250 * - C4: prev_ex can receive the additional blocks without 3251 * overflowing the (initialized) length limit. 3252 */ 3253 if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/ 3254 ((prev_lblk + prev_len) == ee_block) && /*C2*/ 3255 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3256 (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/ 3257 err = ext4_ext_get_access(handle, inode, path + depth); 3258 if (err) 3259 goto out; 3260 3261 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3262 map, ex, prev_ex); 3263 3264 /* Shift the start of ex by 'write_len' blocks */ 3265 ex->ee_block = cpu_to_le32(ee_block + write_len); 3266 ext4_ext_store_pblock(ex, ee_pblk + write_len); 3267 ex->ee_len = cpu_to_le16(ee_len - write_len); 3268 ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 3269 3270 /* Extend prev_ex by 'write_len' blocks */ 3271 prev_ex->ee_len = cpu_to_le16(prev_len + write_len); 3272 3273 /* Mark the block containing both extents as dirty */ 3274 ext4_ext_dirty(handle, inode, path + depth); 3275 3276 /* Update path to point to the right extent */ 3277 path[depth].p_ext = prev_ex; 3278 3279 /* Result: number of initialized blocks past m_lblk */ 3280 allocated = write_len; 3281 goto out; 3282 } 3283 } 3284 3285 WARN_ON(map->m_lblk < ee_block); 3286 /* 3287 * It is safe to convert extent to initialized via explicit 3288 * zeroout only if extent is fully insde i_size or new_size. 3289 */ 3290 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3291 3292 if (EXT4_EXT_MAY_ZEROOUT & split_flag) 3293 max_zeroout = sbi->s_extent_max_zeroout_kb >> 3294 inode->i_sb->s_blocksize_bits; 3295 3296 /* If extent is less than s_max_zeroout_kb, zeroout directly */ 3297 if (max_zeroout && (ee_len <= max_zeroout)) { 3298 err = ext4_ext_zeroout(inode, ex); 3299 if (err) 3300 goto out; 3301 3302 err = ext4_ext_get_access(handle, inode, path + depth); 3303 if (err) 3304 goto out; 3305 ext4_ext_mark_initialized(ex); 3306 ext4_ext_try_to_merge(handle, inode, path, ex); 3307 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3308 goto out; 3309 } 3310 3311 /* 3312 * four cases: 3313 * 1. split the extent into three extents. 3314 * 2. split the extent into two extents, zeroout the first half. 3315 * 3. split the extent into two extents, zeroout the second half. 3316 * 4. split the extent into two extents with out zeroout. 3317 */ 3318 split_map.m_lblk = map->m_lblk; 3319 split_map.m_len = map->m_len; 3320 3321 if (max_zeroout && (allocated > map->m_len)) { 3322 if (allocated <= max_zeroout) { 3323 /* case 3 */ 3324 zero_ex.ee_block = 3325 cpu_to_le32(map->m_lblk); 3326 zero_ex.ee_len = cpu_to_le16(allocated); 3327 ext4_ext_store_pblock(&zero_ex, 3328 ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3329 err = ext4_ext_zeroout(inode, &zero_ex); 3330 if (err) 3331 goto out; 3332 split_map.m_lblk = map->m_lblk; 3333 split_map.m_len = allocated; 3334 } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { 3335 /* case 2 */ 3336 if (map->m_lblk != ee_block) { 3337 zero_ex.ee_block = ex->ee_block; 3338 zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3339 ee_block); 3340 ext4_ext_store_pblock(&zero_ex, 3341 ext4_ext_pblock(ex)); 3342 err = ext4_ext_zeroout(inode, &zero_ex); 3343 if (err) 3344 goto out; 3345 } 3346 3347 split_map.m_lblk = ee_block; 3348 split_map.m_len = map->m_lblk - ee_block + map->m_len; 3349 allocated = map->m_len; 3350 } 3351 } 3352 3353 allocated = ext4_split_extent(handle, inode, path, 3354 &split_map, split_flag, 0); 3355 if (allocated < 0) 3356 err = allocated; 3357 3358out: 3359 return err ? err : allocated; 3360} 3361 3362/* 3363 * This function is called by ext4_ext_map_blocks() from 3364 * ext4_get_blocks_dio_write() when DIO to write 3365 * to an uninitialized extent. 3366 * 3367 * Writing to an uninitialized extent may result in splitting the uninitialized 3368 * extent into multiple initialized/uninitialized extents (up to three) 3369 * There are three possibilities: 3370 * a> There is no split required: Entire extent should be uninitialized 3371 * b> Splits in two extents: Write is happening at either end of the extent 3372 * c> Splits in three extents: Somone is writing in middle of the extent 3373 * 3374 * One of more index blocks maybe needed if the extent tree grow after 3375 * the uninitialized extent split. To prevent ENOSPC occur at the IO 3376 * complete, we need to split the uninitialized extent before DIO submit 3377 * the IO. The uninitialized extent called at this time will be split 3378 * into three uninitialized extent(at most). After IO complete, the part 3379 * being filled will be convert to initialized by the end_io callback function 3380 * via ext4_convert_unwritten_extents(). 3381 * 3382 * Returns the size of uninitialized extent to be written on success. 3383 */ 3384static int ext4_split_unwritten_extents(handle_t *handle, 3385 struct inode *inode, 3386 struct ext4_map_blocks *map, 3387 struct ext4_ext_path *path, 3388 int flags) 3389{ 3390 ext4_lblk_t eof_block; 3391 ext4_lblk_t ee_block; 3392 struct ext4_extent *ex; 3393 unsigned int ee_len; 3394 int split_flag = 0, depth; 3395 3396 ext_debug("ext4_split_unwritten_extents: inode %lu, logical" 3397 "block %llu, max_blocks %u\n", inode->i_ino, 3398 (unsigned long long)map->m_lblk, map->m_len); 3399 3400 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 3401 inode->i_sb->s_blocksize_bits; 3402 if (eof_block < map->m_lblk + map->m_len) 3403 eof_block = map->m_lblk + map->m_len; 3404 /* 3405 * It is safe to convert extent to initialized via explicit 3406 * zeroout only if extent is fully insde i_size or new_size. 3407 */ 3408 depth = ext_depth(inode); 3409 ex = path[depth].p_ext; 3410 ee_block = le32_to_cpu(ex->ee_block); 3411 ee_len = ext4_ext_get_actual_len(ex); 3412 3413 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3414 split_flag |= EXT4_EXT_MARK_UNINIT2; 3415 if (flags & EXT4_GET_BLOCKS_CONVERT) 3416 split_flag |= EXT4_EXT_DATA_VALID2; 3417 flags |= EXT4_GET_BLOCKS_PRE_IO; 3418 return ext4_split_extent(handle, inode, path, map, split_flag, flags); 3419} 3420 3421static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3422 struct inode *inode, 3423 struct ext4_map_blocks *map, 3424 struct ext4_ext_path *path) 3425{ 3426 struct ext4_extent *ex; 3427 ext4_lblk_t ee_block; 3428 unsigned int ee_len; 3429 int depth; 3430 int err = 0; 3431 3432 depth = ext_depth(inode); 3433 ex = path[depth].p_ext; 3434 ee_block = le32_to_cpu(ex->ee_block); 3435 ee_len = ext4_ext_get_actual_len(ex); 3436 3437 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3438 "block %llu, max_blocks %u\n", inode->i_ino, 3439 (unsigned long long)ee_block, ee_len); 3440 3441 /* If extent is larger than requested then split is required */ 3442 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3443 err = ext4_split_unwritten_extents(handle, inode, map, path, 3444 EXT4_GET_BLOCKS_CONVERT); 3445 if (err < 0) 3446 goto out; 3447 ext4_ext_drop_refs(path); 3448 path = ext4_ext_find_extent(inode, map->m_lblk, path); 3449 if (IS_ERR(path)) { 3450 err = PTR_ERR(path); 3451 goto out; 3452 } 3453 depth = ext_depth(inode); 3454 ex = path[depth].p_ext; 3455 } 3456 3457 err = ext4_ext_get_access(handle, inode, path + depth); 3458 if (err) 3459 goto out; 3460 /* first mark the extent as initialized */ 3461 ext4_ext_mark_initialized(ex); 3462 3463 /* note: ext4_ext_correct_indexes() isn't needed here because 3464 * borders are not changed 3465 */ 3466 ext4_ext_try_to_merge(handle, inode, path, ex); 3467 3468 /* Mark modified extent as dirty */ 3469 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3470out: 3471 ext4_ext_show_leaf(inode, path); 3472 return err; 3473} 3474 3475static void unmap_underlying_metadata_blocks(struct block_device *bdev, 3476 sector_t block, int count) 3477{ 3478 int i; 3479 for (i = 0; i < count; i++) 3480 unmap_underlying_metadata(bdev, block + i); 3481} 3482 3483/* 3484 * Handle EOFBLOCKS_FL flag, clearing it if necessary 3485 */ 3486static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3487 ext4_lblk_t lblk, 3488 struct ext4_ext_path *path, 3489 unsigned int len) 3490{ 3491 int i, depth; 3492 struct ext4_extent_header *eh; 3493 struct ext4_extent *last_ex; 3494 3495 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 3496 return 0; 3497 3498 depth = ext_depth(inode); 3499 eh = path[depth].p_hdr; 3500 3501 /* 3502 * We're going to remove EOFBLOCKS_FL entirely in future so we 3503 * do not care for this case anymore. Simply remove the flag 3504 * if there are no extents. 3505 */ 3506 if (unlikely(!eh->eh_entries)) 3507 goto out; 3508 last_ex = EXT_LAST_EXTENT(eh); 3509 /* 3510 * We should clear the EOFBLOCKS_FL flag if we are writing the 3511 * last block in the last extent in the file. We test this by 3512 * first checking to see if the caller to 3513 * ext4_ext_get_blocks() was interested in the last block (or 3514 * a block beyond the last block) in the current extent. If 3515 * this turns out to be false, we can bail out from this 3516 * function immediately. 3517 */ 3518 if (lblk + len < le32_to_cpu(last_ex->ee_block) + 3519 ext4_ext_get_actual_len(last_ex)) 3520 return 0; 3521 /* 3522 * If the caller does appear to be planning to write at or 3523 * beyond the end of the current extent, we then test to see 3524 * if the current extent is the last extent in the file, by 3525 * checking to make sure it was reached via the rightmost node 3526 * at each level of the tree. 3527 */ 3528 for (i = depth-1; i >= 0; i--) 3529 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 3530 return 0; 3531out: 3532 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3533 return ext4_mark_inode_dirty(handle, inode); 3534} 3535 3536/** 3537 * ext4_find_delalloc_range: find delayed allocated block in the given range. 3538 * 3539 * Return 1 if there is a delalloc block in the range, otherwise 0. 3540 */ 3541int ext4_find_delalloc_range(struct inode *inode, 3542 ext4_lblk_t lblk_start, 3543 ext4_lblk_t lblk_end) 3544{ 3545 struct extent_status es; 3546 3547 ext4_es_find_delayed_extent(inode, lblk_start, &es); 3548 if (es.es_len == 0) 3549 return 0; /* there is no delay extent in this tree */ 3550 else if (es.es_lblk <= lblk_start && 3551 lblk_start < es.es_lblk + es.es_len) 3552 return 1; 3553 else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end) 3554 return 1; 3555 else 3556 return 0; 3557} 3558 3559int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) 3560{ 3561 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3562 ext4_lblk_t lblk_start, lblk_end; 3563 lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); 3564 lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 3565 3566 return ext4_find_delalloc_range(inode, lblk_start, lblk_end); 3567} 3568 3569/** 3570 * Determines how many complete clusters (out of those specified by the 'map') 3571 * are under delalloc and were reserved quota for. 3572 * This function is called when we are writing out the blocks that were 3573 * originally written with their allocation delayed, but then the space was 3574 * allocated using fallocate() before the delayed allocation could be resolved. 3575 * The cases to look for are: 3576 * ('=' indicated delayed allocated blocks 3577 * '-' indicates non-delayed allocated blocks) 3578 * (a) partial clusters towards beginning and/or end outside of allocated range 3579 * are not delalloc'ed. 3580 * Ex: 3581 * |----c---=|====c====|====c====|===-c----| 3582 * |++++++ allocated ++++++| 3583 * ==> 4 complete clusters in above example 3584 * 3585 * (b) partial cluster (outside of allocated range) towards either end is 3586 * marked for delayed allocation. In this case, we will exclude that 3587 * cluster. 3588 * Ex: 3589 * |----====c========|========c========| 3590 * |++++++ allocated ++++++| 3591 * ==> 1 complete clusters in above example 3592 * 3593 * Ex: 3594 * |================c================| 3595 * |++++++ allocated ++++++| 3596 * ==> 0 complete clusters in above example 3597 * 3598 * The ext4_da_update_reserve_space will be called only if we 3599 * determine here that there were some "entire" clusters that span 3600 * this 'allocated' range. 3601 * In the non-bigalloc case, this function will just end up returning num_blks 3602 * without ever calling ext4_find_delalloc_range. 3603 */ 3604static unsigned int 3605get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, 3606 unsigned int num_blks) 3607{ 3608 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3609 ext4_lblk_t alloc_cluster_start, alloc_cluster_end; 3610 ext4_lblk_t lblk_from, lblk_to, c_offset; 3611 unsigned int allocated_clusters = 0; 3612 3613 alloc_cluster_start = EXT4_B2C(sbi, lblk_start); 3614 alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); 3615 3616 /* max possible clusters for this allocation */ 3617 allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; 3618 3619 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3620 3621 /* Check towards left side */ 3622 c_offset = lblk_start & (sbi->s_cluster_ratio - 1); 3623 if (c_offset) { 3624 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); 3625 lblk_to = lblk_from + c_offset - 1; 3626 3627 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 3628 allocated_clusters--; 3629 } 3630 3631 /* Now check towards right. */ 3632 c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); 3633 if (allocated_clusters && c_offset) { 3634 lblk_from = lblk_start + num_blks; 3635 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 3636 3637 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 3638 allocated_clusters--; 3639 } 3640 3641 return allocated_clusters; 3642} 3643 3644static int 3645ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3646 struct ext4_map_blocks *map, 3647 struct ext4_ext_path *path, int flags, 3648 unsigned int allocated, ext4_fsblk_t newblock) 3649{ 3650 int ret = 0; 3651 int err = 0; 3652 ext4_io_end_t *io = ext4_inode_aio(inode); 3653 3654 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical " 3655 "block %llu, max_blocks %u, flags %x, allocated %u\n", 3656 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 3657 flags, allocated); 3658 ext4_ext_show_leaf(inode, path); 3659 3660 trace_ext4_ext_handle_uninitialized_extents(inode, map, flags, 3661 allocated, newblock); 3662 3663 /* get_block() before submit the IO, split the extent */ 3664 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3665 ret = ext4_split_unwritten_extents(handle, inode, map, 3666 path, flags); 3667 if (ret <= 0) 3668 goto out; 3669 /* 3670 * Flag the inode(non aio case) or end_io struct (aio case) 3671 * that this IO needs to conversion to written when IO is 3672 * completed 3673 */ 3674 if (io) 3675 ext4_set_io_unwritten_flag(inode, io); 3676 else 3677 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3678 map->m_flags |= EXT4_MAP_UNWRITTEN; 3679 if (ext4_should_dioread_nolock(inode)) 3680 map->m_flags |= EXT4_MAP_UNINIT; 3681 goto out; 3682 } 3683 /* IO end_io complete, convert the filled extent to written */ 3684 if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3685 ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 3686 path); 3687 if (ret >= 0) { 3688 ext4_update_inode_fsync_trans(handle, inode, 1); 3689 err = check_eofblocks_fl(handle, inode, map->m_lblk, 3690 path, map->m_len); 3691 } else 3692 err = ret; 3693 goto out2; 3694 } 3695 /* buffered IO case */ 3696 /* 3697 * repeat fallocate creation request 3698 * we already have an unwritten extent 3699 */ 3700 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) { 3701 map->m_flags |= EXT4_MAP_UNWRITTEN; 3702 goto map_out; 3703 } 3704 3705 /* buffered READ or buffered write_begin() lookup */ 3706 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3707 /* 3708 * We have blocks reserved already. We 3709 * return allocated blocks so that delalloc 3710 * won't do block reservation for us. But 3711 * the buffer head will be unmapped so that 3712 * a read from the block returns 0s. 3713 */ 3714 map->m_flags |= EXT4_MAP_UNWRITTEN; 3715 goto out1; 3716 } 3717 3718 /* buffered write, writepage time, convert*/ 3719 ret = ext4_ext_convert_to_initialized(handle, inode, map, path); 3720 if (ret >= 0) 3721 ext4_update_inode_fsync_trans(handle, inode, 1); 3722out: 3723 if (ret <= 0) { 3724 err = ret; 3725 goto out2; 3726 } else 3727 allocated = ret; 3728 map->m_flags |= EXT4_MAP_NEW; 3729 /* 3730 * if we allocated more blocks than requested 3731 * we need to make sure we unmap the extra block 3732 * allocated. The actual needed block will get 3733 * unmapped later when we find the buffer_head marked 3734 * new. 3735 */ 3736 if (allocated > map->m_len) { 3737 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3738 newblock + map->m_len, 3739 allocated - map->m_len); 3740 allocated = map->m_len; 3741 } 3742 3743 /* 3744 * If we have done fallocate with the offset that is already 3745 * delayed allocated, we would have block reservation 3746 * and quota reservation done in the delayed write path. 3747 * But fallocate would have already updated quota and block 3748 * count for this offset. So cancel these reservation 3749 */ 3750 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 3751 unsigned int reserved_clusters; 3752 reserved_clusters = get_reserved_cluster_alloc(inode, 3753 map->m_lblk, map->m_len); 3754 if (reserved_clusters) 3755 ext4_da_update_reserve_space(inode, 3756 reserved_clusters, 3757 0); 3758 } 3759 3760map_out: 3761 map->m_flags |= EXT4_MAP_MAPPED; 3762 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { 3763 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 3764 map->m_len); 3765 if (err < 0) 3766 goto out2; 3767 } 3768out1: 3769 if (allocated > map->m_len) 3770 allocated = map->m_len; 3771 ext4_ext_show_leaf(inode, path); 3772 map->m_pblk = newblock; 3773 map->m_len = allocated; 3774out2: 3775 if (path) { 3776 ext4_ext_drop_refs(path); 3777 kfree(path); 3778 } 3779 return err ? err : allocated; 3780} 3781 3782/* 3783 * get_implied_cluster_alloc - check to see if the requested 3784 * allocation (in the map structure) overlaps with a cluster already 3785 * allocated in an extent. 3786 * @sb The filesystem superblock structure 3787 * @map The requested lblk->pblk mapping 3788 * @ex The extent structure which might contain an implied 3789 * cluster allocation 3790 * 3791 * This function is called by ext4_ext_map_blocks() after we failed to 3792 * find blocks that were already in the inode's extent tree. Hence, 3793 * we know that the beginning of the requested region cannot overlap 3794 * the extent from the inode's extent tree. There are three cases we 3795 * want to catch. The first is this case: 3796 * 3797 * |--- cluster # N--| 3798 * |--- extent ---| |---- requested region ---| 3799 * |==========| 3800 * 3801 * The second case that we need to test for is this one: 3802 * 3803 * |--------- cluster # N ----------------| 3804 * |--- requested region --| |------- extent ----| 3805 * |=======================| 3806 * 3807 * The third case is when the requested region lies between two extents 3808 * within the same cluster: 3809 * |------------- cluster # N-------------| 3810 * |----- ex -----| |---- ex_right ----| 3811 * |------ requested region ------| 3812 * |================| 3813 * 3814 * In each of the above cases, we need to set the map->m_pblk and 3815 * map->m_len so it corresponds to the return the extent labelled as 3816 * "|====|" from cluster #N, since it is already in use for data in 3817 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 3818 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 3819 * as a new "allocated" block region. Otherwise, we will return 0 and 3820 * ext4_ext_map_blocks() will then allocate one or more new clusters 3821 * by calling ext4_mb_new_blocks(). 3822 */ 3823static int get_implied_cluster_alloc(struct super_block *sb, 3824 struct ext4_map_blocks *map, 3825 struct ext4_extent *ex, 3826 struct ext4_ext_path *path) 3827{ 3828 struct ext4_sb_info *sbi = EXT4_SB(sb); 3829 ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 3830 ext4_lblk_t ex_cluster_start, ex_cluster_end; 3831 ext4_lblk_t rr_cluster_start; 3832 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 3833 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 3834 unsigned short ee_len = ext4_ext_get_actual_len(ex); 3835 3836 /* The extent passed in that we are trying to match */ 3837 ex_cluster_start = EXT4_B2C(sbi, ee_block); 3838 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 3839 3840 /* The requested region passed into ext4_map_blocks() */ 3841 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 3842 3843 if ((rr_cluster_start == ex_cluster_end) || 3844 (rr_cluster_start == ex_cluster_start)) { 3845 if (rr_cluster_start == ex_cluster_end) 3846 ee_start += ee_len - 1; 3847 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + 3848 c_offset; 3849 map->m_len = min(map->m_len, 3850 (unsigned) sbi->s_cluster_ratio - c_offset); 3851 /* 3852 * Check for and handle this case: 3853 * 3854 * |--------- cluster # N-------------| 3855 * |------- extent ----| 3856 * |--- requested region ---| 3857 * |===========| 3858 */ 3859 3860 if (map->m_lblk < ee_block) 3861 map->m_len = min(map->m_len, ee_block - map->m_lblk); 3862 3863 /* 3864 * Check for the case where there is already another allocated 3865 * block to the right of 'ex' but before the end of the cluster. 3866 * 3867 * |------------- cluster # N-------------| 3868 * |----- ex -----| |---- ex_right ----| 3869 * |------ requested region ------| 3870 * |================| 3871 */ 3872 if (map->m_lblk > ee_block) { 3873 ext4_lblk_t next = ext4_ext_next_allocated_block(path); 3874 map->m_len = min(map->m_len, next - map->m_lblk); 3875 } 3876 3877 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 3878 return 1; 3879 } 3880 3881 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 3882 return 0; 3883} 3884 3885 3886/* 3887 * Block allocation/map/preallocation routine for extents based files 3888 * 3889 * 3890 * Need to be called with 3891 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 3892 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 3893 * 3894 * return > 0, number of of blocks already mapped/allocated 3895 * if create == 0 and these are pre-allocated blocks 3896 * buffer head is unmapped 3897 * otherwise blocks are mapped 3898 * 3899 * return = 0, if plain look up failed (blocks have not been allocated) 3900 * buffer head is unmapped 3901 * 3902 * return < 0, error case. 3903 */ 3904int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 3905 struct ext4_map_blocks *map, int flags) 3906{ 3907 struct ext4_ext_path *path = NULL; 3908 struct ext4_extent newex, *ex, *ex2; 3909 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3910 ext4_fsblk_t newblock = 0; 3911 int free_on_err = 0, err = 0, depth; 3912 unsigned int allocated = 0, offset = 0; 3913 unsigned int allocated_clusters = 0; 3914 struct ext4_allocation_request ar; 3915 ext4_io_end_t *io = ext4_inode_aio(inode); 3916 ext4_lblk_t cluster_offset; 3917 int set_unwritten = 0; 3918 3919 ext_debug("blocks %u/%u requested for inode %lu\n", 3920 map->m_lblk, map->m_len, inode->i_ino); 3921 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 3922 3923 /* check in cache */ 3924 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { 3925 if (!newex.ee_start_lo && !newex.ee_start_hi) { 3926 if ((sbi->s_cluster_ratio > 1) && 3927 ext4_find_delalloc_cluster(inode, map->m_lblk)) 3928 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 3929 3930 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3931 /* 3932 * block isn't allocated yet and 3933 * user doesn't want to allocate it 3934 */ 3935 goto out2; 3936 } 3937 /* we should allocate requested block */ 3938 } else { 3939 /* block is already allocated */ 3940 if (sbi->s_cluster_ratio > 1) 3941 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 3942 newblock = map->m_lblk 3943 - le32_to_cpu(newex.ee_block) 3944 + ext4_ext_pblock(&newex); 3945 /* number of remaining blocks in the extent */ 3946 allocated = ext4_ext_get_actual_len(&newex) - 3947 (map->m_lblk - le32_to_cpu(newex.ee_block)); 3948 goto out; 3949 } 3950 } 3951 3952 /* find extent for this block */ 3953 path = ext4_ext_find_extent(inode, map->m_lblk, NULL); 3954 if (IS_ERR(path)) { 3955 err = PTR_ERR(path); 3956 path = NULL; 3957 goto out2; 3958 } 3959 3960 depth = ext_depth(inode); 3961 3962 /* 3963 * consistent leaf must not be empty; 3964 * this situation is possible, though, _during_ tree modification; 3965 * this is why assert can't be put in ext4_ext_find_extent() 3966 */ 3967 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 3968 EXT4_ERROR_INODE(inode, "bad extent address " 3969 "lblock: %lu, depth: %d pblock %lld", 3970 (unsigned long) map->m_lblk, depth, 3971 path[depth].p_block); 3972 err = -EIO; 3973 goto out2; 3974 } 3975 3976 ex = path[depth].p_ext; 3977 if (ex) { 3978 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 3979 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 3980 unsigned short ee_len; 3981 3982 /* 3983 * Uninitialized extents are treated as holes, except that 3984 * we split out initialized portions during a write. 3985 */ 3986 ee_len = ext4_ext_get_actual_len(ex); 3987 3988 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 3989 3990 /* if found extent covers block, simply return it */ 3991 if (in_range(map->m_lblk, ee_block, ee_len)) { 3992 newblock = map->m_lblk - ee_block + ee_start; 3993 /* number of remaining blocks in the extent */ 3994 allocated = ee_len - (map->m_lblk - ee_block); 3995 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 3996 ee_block, ee_len, newblock); 3997 3998 /* 3999 * Do not put uninitialized extent 4000 * in the cache 4001 */ 4002 if (!ext4_ext_is_uninitialized(ex)) { 4003 ext4_ext_put_in_cache(inode, ee_block, 4004 ee_len, ee_start); 4005 goto out; 4006 } 4007 allocated = ext4_ext_handle_uninitialized_extents( 4008 handle, inode, map, path, flags, 4009 allocated, newblock); 4010 goto out3; 4011 } 4012 } 4013 4014 if ((sbi->s_cluster_ratio > 1) && 4015 ext4_find_delalloc_cluster(inode, map->m_lblk)) 4016 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4017 4018 /* 4019 * requested block isn't allocated yet; 4020 * we couldn't try to create block if create flag is zero 4021 */ 4022 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4023 /* 4024 * put just found gap into cache to speed up 4025 * subsequent requests 4026 */ 4027 if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0) 4028 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); 4029 goto out2; 4030 } 4031 4032 /* 4033 * Okay, we need to do block allocation. 4034 */ 4035 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 4036 newex.ee_block = cpu_to_le32(map->m_lblk); 4037 cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 4038 4039 /* 4040 * If we are doing bigalloc, check to see if the extent returned 4041 * by ext4_ext_find_extent() implies a cluster we can use. 4042 */ 4043 if (cluster_offset && ex && 4044 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 4045 ar.len = allocated = map->m_len; 4046 newblock = map->m_pblk; 4047 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4048 goto got_allocated_blocks; 4049 } 4050 4051 /* find neighbour allocated blocks */ 4052 ar.lleft = map->m_lblk; 4053 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4054 if (err) 4055 goto out2; 4056 ar.lright = map->m_lblk; 4057 ex2 = NULL; 4058 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4059 if (err) 4060 goto out2; 4061 4062 /* Check if the extent after searching to the right implies a 4063 * cluster we can use. */ 4064 if ((sbi->s_cluster_ratio > 1) && ex2 && 4065 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 4066 ar.len = allocated = map->m_len; 4067 newblock = map->m_pblk; 4068 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4069 goto got_allocated_blocks; 4070 } 4071 4072 /* 4073 * See if request is beyond maximum number of blocks we can have in 4074 * a single extent. For an initialized extent this limit is 4075 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 4076 * EXT_UNINIT_MAX_LEN. 4077 */ 4078 if (map->m_len > EXT_INIT_MAX_LEN && 4079 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4080 map->m_len = EXT_INIT_MAX_LEN; 4081 else if (map->m_len > EXT_UNINIT_MAX_LEN && 4082 (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4083 map->m_len = EXT_UNINIT_MAX_LEN; 4084 4085 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4086 newex.ee_len = cpu_to_le16(map->m_len); 4087 err = ext4_ext_check_overlap(sbi, inode, &newex, path); 4088 if (err) 4089 allocated = ext4_ext_get_actual_len(&newex); 4090 else 4091 allocated = map->m_len; 4092 4093 /* allocate new block */ 4094 ar.inode = inode; 4095 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4096 ar.logical = map->m_lblk; 4097 /* 4098 * We calculate the offset from the beginning of the cluster 4099 * for the logical block number, since when we allocate a 4100 * physical cluster, the physical block should start at the 4101 * same offset from the beginning of the cluster. This is 4102 * needed so that future calls to get_implied_cluster_alloc() 4103 * work correctly. 4104 */ 4105 offset = map->m_lblk & (sbi->s_cluster_ratio - 1); 4106 ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 4107 ar.goal -= offset; 4108 ar.logical -= offset; 4109 if (S_ISREG(inode->i_mode)) 4110 ar.flags = EXT4_MB_HINT_DATA; 4111 else 4112 /* disable in-core preallocation for non-regular files */ 4113 ar.flags = 0; 4114 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4115 ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4116 newblock = ext4_mb_new_blocks(handle, &ar, &err); 4117 if (!newblock) 4118 goto out2; 4119 ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4120 ar.goal, newblock, allocated); 4121 free_on_err = 1; 4122 allocated_clusters = ar.len; 4123 ar.len = EXT4_C2B(sbi, ar.len) - offset; 4124 if (ar.len > allocated) 4125 ar.len = allocated; 4126 4127got_allocated_blocks: 4128 /* try to insert new extent into found leaf and return */ 4129 ext4_ext_store_pblock(&newex, newblock + offset); 4130 newex.ee_len = cpu_to_le16(ar.len); 4131 /* Mark uninitialized */ 4132 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ 4133 ext4_ext_mark_uninitialized(&newex); 4134 map->m_flags |= EXT4_MAP_UNWRITTEN; 4135 /* 4136 * io_end structure was created for every IO write to an 4137 * uninitialized extent. To avoid unnecessary conversion, 4138 * here we flag the IO that really needs the conversion. 4139 * For non asycn direct IO case, flag the inode state 4140 * that we need to perform conversion when IO is done. 4141 */ 4142 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) 4143 set_unwritten = 1; 4144 if (ext4_should_dioread_nolock(inode)) 4145 map->m_flags |= EXT4_MAP_UNINIT; 4146 } 4147 4148 err = 0; 4149 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) 4150 err = check_eofblocks_fl(handle, inode, map->m_lblk, 4151 path, ar.len); 4152 if (!err) 4153 err = ext4_ext_insert_extent(handle, inode, path, 4154 &newex, flags); 4155 4156 if (!err && set_unwritten) { 4157 if (io) 4158 ext4_set_io_unwritten_flag(inode, io); 4159 else 4160 ext4_set_inode_state(inode, 4161 EXT4_STATE_DIO_UNWRITTEN); 4162 } 4163 4164 if (err && free_on_err) { 4165 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 4166 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4167 /* free data blocks we just allocated */ 4168 /* not a good idea to call discard here directly, 4169 * but otherwise we'd need to call it every free() */ 4170 ext4_discard_preallocations(inode); 4171 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), 4172 ext4_ext_get_actual_len(&newex), fb_flags); 4173 goto out2; 4174 } 4175 4176 /* previous routine could use block we allocated */ 4177 newblock = ext4_ext_pblock(&newex); 4178 allocated = ext4_ext_get_actual_len(&newex); 4179 if (allocated > map->m_len) 4180 allocated = map->m_len; 4181 map->m_flags |= EXT4_MAP_NEW; 4182 4183 /* 4184 * Update reserved blocks/metadata blocks after successful 4185 * block allocation which had been deferred till now. 4186 */ 4187 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 4188 unsigned int reserved_clusters; 4189 /* 4190 * Check how many clusters we had reserved this allocated range 4191 */ 4192 reserved_clusters = get_reserved_cluster_alloc(inode, 4193 map->m_lblk, allocated); 4194 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 4195 if (reserved_clusters) { 4196 /* 4197 * We have clusters reserved for this range. 4198 * But since we are not doing actual allocation 4199 * and are simply using blocks from previously 4200 * allocated cluster, we should release the 4201 * reservation and not claim quota. 4202 */ 4203 ext4_da_update_reserve_space(inode, 4204 reserved_clusters, 0); 4205 } 4206 } else { 4207 BUG_ON(allocated_clusters < reserved_clusters); 4208 /* We will claim quota for all newly allocated blocks.*/ 4209 ext4_da_update_reserve_space(inode, allocated_clusters, 4210 1); 4211 if (reserved_clusters < allocated_clusters) { 4212 struct ext4_inode_info *ei = EXT4_I(inode); 4213 int reservation = allocated_clusters - 4214 reserved_clusters; 4215 /* 4216 * It seems we claimed few clusters outside of 4217 * the range of this allocation. We should give 4218 * it back to the reservation pool. This can 4219 * happen in the following case: 4220 * 4221 * * Suppose s_cluster_ratio is 4 (i.e., each 4222 * cluster has 4 blocks. Thus, the clusters 4223 * are [0-3],[4-7],[8-11]... 4224 * * First comes delayed allocation write for 4225 * logical blocks 10 & 11. Since there were no 4226 * previous delayed allocated blocks in the 4227 * range [8-11], we would reserve 1 cluster 4228 * for this write. 4229 * * Next comes write for logical blocks 3 to 8. 4230 * In this case, we will reserve 2 clusters 4231 * (for [0-3] and [4-7]; and not for [8-11] as 4232 * that range has a delayed allocated blocks. 4233 * Thus total reserved clusters now becomes 3. 4234 * * Now, during the delayed allocation writeout 4235 * time, we will first write blocks [3-8] and 4236 * allocate 3 clusters for writing these 4237 * blocks. Also, we would claim all these 4238 * three clusters above. 4239 * * Now when we come here to writeout the 4240 * blocks [10-11], we would expect to claim 4241 * the reservation of 1 cluster we had made 4242 * (and we would claim it since there are no 4243 * more delayed allocated blocks in the range 4244 * [8-11]. But our reserved cluster count had 4245 * already gone to 0. 4246 * 4247 * Thus, at the step 4 above when we determine 4248 * that there are still some unwritten delayed 4249 * allocated blocks outside of our current 4250 * block range, we should increment the 4251 * reserved clusters count so that when the 4252 * remaining blocks finally gets written, we 4253 * could claim them. 4254 */ 4255 dquot_reserve_block(inode, 4256 EXT4_C2B(sbi, reservation)); 4257 spin_lock(&ei->i_block_reservation_lock); 4258 ei->i_reserved_data_blocks += reservation; 4259 spin_unlock(&ei->i_block_reservation_lock); 4260 } 4261 } 4262 } 4263 4264 /* 4265 * Cache the extent and update transaction to commit on fdatasync only 4266 * when it is _not_ an uninitialized extent. 4267 */ 4268 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { 4269 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); 4270 ext4_update_inode_fsync_trans(handle, inode, 1); 4271 } else 4272 ext4_update_inode_fsync_trans(handle, inode, 0); 4273out: 4274 if (allocated > map->m_len) 4275 allocated = map->m_len; 4276 ext4_ext_show_leaf(inode, path); 4277 map->m_flags |= EXT4_MAP_MAPPED; 4278 map->m_pblk = newblock; 4279 map->m_len = allocated; 4280out2: 4281 if (path) { 4282 ext4_ext_drop_refs(path); 4283 kfree(path); 4284 } 4285 4286out3: 4287 trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated); 4288 4289 return err ? err : allocated; 4290} 4291 4292void ext4_ext_truncate(struct inode *inode) 4293{ 4294 struct address_space *mapping = inode->i_mapping; 4295 struct super_block *sb = inode->i_sb; 4296 ext4_lblk_t last_block; 4297 handle_t *handle; 4298 loff_t page_len; 4299 int err = 0; 4300 4301 /* 4302 * finish any pending end_io work so we won't run the risk of 4303 * converting any truncated blocks to initialized later 4304 */ 4305 ext4_flush_unwritten_io(inode); 4306 4307 /* 4308 * probably first extent we're gonna free will be last in block 4309 */ 4310 err = ext4_writepage_trans_blocks(inode); 4311 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, err); 4312 if (IS_ERR(handle)) 4313 return; 4314 4315 if (inode->i_size % PAGE_CACHE_SIZE != 0) { 4316 page_len = PAGE_CACHE_SIZE - 4317 (inode->i_size & (PAGE_CACHE_SIZE - 1)); 4318 4319 err = ext4_discard_partial_page_buffers(handle, 4320 mapping, inode->i_size, page_len, 0); 4321 4322 if (err) 4323 goto out_stop; 4324 } 4325 4326 if (ext4_orphan_add(handle, inode)) 4327 goto out_stop; 4328 4329 down_write(&EXT4_I(inode)->i_data_sem); 4330 ext4_ext_invalidate_cache(inode); 4331 4332 ext4_discard_preallocations(inode); 4333 4334 /* 4335 * TODO: optimization is possible here. 4336 * Probably we need not scan at all, 4337 * because page truncation is enough. 4338 */ 4339 4340 /* we have to know where to truncate from in crash case */ 4341 EXT4_I(inode)->i_disksize = inode->i_size; 4342 ext4_mark_inode_dirty(handle, inode); 4343 4344 last_block = (inode->i_size + sb->s_blocksize - 1) 4345 >> EXT4_BLOCK_SIZE_BITS(sb); 4346 err = ext4_es_remove_extent(inode, last_block, 4347 EXT_MAX_BLOCKS - last_block); 4348 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4349 4350 /* In a multi-transaction truncate, we only make the final 4351 * transaction synchronous. 4352 */ 4353 if (IS_SYNC(inode)) 4354 ext4_handle_sync(handle); 4355 4356 up_write(&EXT4_I(inode)->i_data_sem); 4357 4358out_stop: 4359 /* 4360 * If this was a simple ftruncate() and the file will remain alive, 4361 * then we need to clear up the orphan record which we created above. 4362 * However, if this was a real unlink then we were called by 4363 * ext4_delete_inode(), and we allow that function to clean up the 4364 * orphan info for us. 4365 */ 4366 if (inode->i_nlink) 4367 ext4_orphan_del(handle, inode); 4368 4369 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4370 ext4_mark_inode_dirty(handle, inode); 4371 ext4_journal_stop(handle); 4372} 4373 4374static void ext4_falloc_update_inode(struct inode *inode, 4375 int mode, loff_t new_size, int update_ctime) 4376{ 4377 struct timespec now; 4378 4379 if (update_ctime) { 4380 now = current_fs_time(inode->i_sb); 4381 if (!timespec_equal(&inode->i_ctime, &now)) 4382 inode->i_ctime = now; 4383 } 4384 /* 4385 * Update only when preallocation was requested beyond 4386 * the file size. 4387 */ 4388 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 4389 if (new_size > i_size_read(inode)) 4390 i_size_write(inode, new_size); 4391 if (new_size > EXT4_I(inode)->i_disksize) 4392 ext4_update_i_disksize(inode, new_size); 4393 } else { 4394 /* 4395 * Mark that we allocate beyond EOF so the subsequent truncate 4396 * can proceed even if the new size is the same as i_size. 4397 */ 4398 if (new_size > i_size_read(inode)) 4399 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4400 } 4401 4402} 4403 4404/* 4405 * preallocate space for a file. This implements ext4's fallocate file 4406 * operation, which gets called from sys_fallocate system call. 4407 * For block-mapped files, posix_fallocate should fall back to the method 4408 * of writing zeroes to the required new blocks (the same behavior which is 4409 * expected for file systems which do not support fallocate() system call). 4410 */ 4411long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4412{ 4413 struct inode *inode = file->f_path.dentry->d_inode; 4414 handle_t *handle; 4415 loff_t new_size; 4416 unsigned int max_blocks; 4417 int ret = 0; 4418 int ret2 = 0; 4419 int retries = 0; 4420 int flags; 4421 struct ext4_map_blocks map; 4422 unsigned int credits, blkbits = inode->i_blkbits; 4423 4424 /* Return error if mode is not supported */ 4425 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 4426 return -EOPNOTSUPP; 4427 4428 if (mode & FALLOC_FL_PUNCH_HOLE) 4429 return ext4_punch_hole(file, offset, len); 4430 4431 ret = ext4_convert_inline_data(inode); 4432 if (ret) 4433 return ret; 4434 4435 /* 4436 * currently supporting (pre)allocate mode for extent-based 4437 * files _only_ 4438 */ 4439 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 4440 return -EOPNOTSUPP; 4441 4442 trace_ext4_fallocate_enter(inode, offset, len, mode); 4443 map.m_lblk = offset >> blkbits; 4444 /* 4445 * We can't just convert len to max_blocks because 4446 * If blocksize = 4096 offset = 3072 and len = 2048 4447 */ 4448 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 4449 - map.m_lblk; 4450 /* 4451 * credits to insert 1 extent into extent tree 4452 */ 4453 credits = ext4_chunk_trans_blocks(inode, max_blocks); 4454 mutex_lock(&inode->i_mutex); 4455 ret = inode_newsize_ok(inode, (len + offset)); 4456 if (ret) { 4457 mutex_unlock(&inode->i_mutex); 4458 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 4459 return ret; 4460 } 4461 flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT; 4462 if (mode & FALLOC_FL_KEEP_SIZE) 4463 flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4464 /* 4465 * Don't normalize the request if it can fit in one extent so 4466 * that it doesn't get unnecessarily split into multiple 4467 * extents. 4468 */ 4469 if (len <= EXT_UNINIT_MAX_LEN << blkbits) 4470 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 4471 4472 /* Prevent race condition between unwritten */ 4473 ext4_flush_unwritten_io(inode); 4474retry: 4475 while (ret >= 0 && ret < max_blocks) { 4476 map.m_lblk = map.m_lblk + ret; 4477 map.m_len = max_blocks = max_blocks - ret; 4478 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4479 credits); 4480 if (IS_ERR(handle)) { 4481 ret = PTR_ERR(handle); 4482 break; 4483 } 4484 ret = ext4_map_blocks(handle, inode, &map, flags); 4485 if (ret <= 0) { 4486#ifdef EXT4FS_DEBUG 4487 ext4_warning(inode->i_sb, 4488 "inode #%lu: block %u: len %u: " 4489 "ext4_ext_map_blocks returned %d", 4490 inode->i_ino, map.m_lblk, 4491 map.m_len, ret); 4492#endif 4493 ext4_mark_inode_dirty(handle, inode); 4494 ret2 = ext4_journal_stop(handle); 4495 break; 4496 } 4497 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, 4498 blkbits) >> blkbits)) 4499 new_size = offset + len; 4500 else 4501 new_size = ((loff_t) map.m_lblk + ret) << blkbits; 4502 4503 ext4_falloc_update_inode(inode, mode, new_size, 4504 (map.m_flags & EXT4_MAP_NEW)); 4505 ext4_mark_inode_dirty(handle, inode); 4506 if ((file->f_flags & O_SYNC) && ret >= max_blocks) 4507 ext4_handle_sync(handle); 4508 ret2 = ext4_journal_stop(handle); 4509 if (ret2) 4510 break; 4511 } 4512 if (ret == -ENOSPC && 4513 ext4_should_retry_alloc(inode->i_sb, &retries)) { 4514 ret = 0; 4515 goto retry; 4516 } 4517 mutex_unlock(&inode->i_mutex); 4518 trace_ext4_fallocate_exit(inode, offset, max_blocks, 4519 ret > 0 ? ret2 : ret); 4520 return ret > 0 ? ret2 : ret; 4521} 4522 4523/* 4524 * This function convert a range of blocks to written extents 4525 * The caller of this function will pass the start offset and the size. 4526 * all unwritten extents within this range will be converted to 4527 * written extents. 4528 * 4529 * This function is called from the direct IO end io call back 4530 * function, to convert the fallocated extents after IO is completed. 4531 * Returns 0 on success. 4532 */ 4533int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 4534 ssize_t len) 4535{ 4536 handle_t *handle; 4537 unsigned int max_blocks; 4538 int ret = 0; 4539 int ret2 = 0; 4540 struct ext4_map_blocks map; 4541 unsigned int credits, blkbits = inode->i_blkbits; 4542 4543 map.m_lblk = offset >> blkbits; 4544 /* 4545 * We can't just convert len to max_blocks because 4546 * If blocksize = 4096 offset = 3072 and len = 2048 4547 */ 4548 max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - 4549 map.m_lblk); 4550 /* 4551 * credits to insert 1 extent into extent tree 4552 */ 4553 credits = ext4_chunk_trans_blocks(inode, max_blocks); 4554 while (ret >= 0 && ret < max_blocks) { 4555 map.m_lblk += ret; 4556 map.m_len = (max_blocks -= ret); 4557 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits); 4558 if (IS_ERR(handle)) { 4559 ret = PTR_ERR(handle); 4560 break; 4561 } 4562 ret = ext4_map_blocks(handle, inode, &map, 4563 EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4564 if (ret <= 0) 4565 ext4_warning(inode->i_sb, 4566 "inode #%lu: block %u: len %u: " 4567 "ext4_ext_map_blocks returned %d", 4568 inode->i_ino, map.m_lblk, 4569 map.m_len, ret); 4570 ext4_mark_inode_dirty(handle, inode); 4571 ret2 = ext4_journal_stop(handle); 4572 if (ret <= 0 || ret2 ) 4573 break; 4574 } 4575 return ret > 0 ? ret2 : ret; 4576} 4577 4578/* 4579 * If newex is not existing extent (newex->ec_start equals zero) find 4580 * delayed extent at start of newex and update newex accordingly and 4581 * return start of the next delayed extent. 4582 * 4583 * If newex is existing extent (newex->ec_start is not equal zero) 4584 * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed 4585 * extent found. Leave newex unmodified. 4586 */ 4587static int ext4_find_delayed_extent(struct inode *inode, 4588 struct ext4_ext_cache *newex) 4589{ 4590 struct extent_status es; 4591 ext4_lblk_t block, next_del; 4592 4593 ext4_es_find_delayed_extent(inode, newex->ec_block, &es); 4594 4595 if (newex->ec_start == 0) { 4596 /* 4597 * No extent in extent-tree contains block @newex->ec_start, 4598 * then the block may stay in 1)a hole or 2)delayed-extent. 4599 */ 4600 if (es.es_len == 0) 4601 /* A hole found. */ 4602 return 0; 4603 4604 if (es.es_lblk > newex->ec_block) { 4605 /* A hole found. */ 4606 newex->ec_len = min(es.es_lblk - newex->ec_block, 4607 newex->ec_len); 4608 return 0; 4609 } 4610 4611 newex->ec_len = es.es_lblk + es.es_len - newex->ec_block; 4612 } 4613 4614 block = newex->ec_block + newex->ec_len; 4615 ext4_es_find_delayed_extent(inode, block, &es); 4616 if (es.es_len == 0) 4617 next_del = EXT_MAX_BLOCKS; 4618 else 4619 next_del = es.es_lblk; 4620 4621 return next_del; 4622} 4623/* fiemap flags we can handle specified here */ 4624#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 4625 4626static int ext4_xattr_fiemap(struct inode *inode, 4627 struct fiemap_extent_info *fieinfo) 4628{ 4629 __u64 physical = 0; 4630 __u64 length; 4631 __u32 flags = FIEMAP_EXTENT_LAST; 4632 int blockbits = inode->i_sb->s_blocksize_bits; 4633 int error = 0; 4634 4635 /* in-inode? */ 4636 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 4637 struct ext4_iloc iloc; 4638 int offset; /* offset of xattr in inode */ 4639 4640 error = ext4_get_inode_loc(inode, &iloc); 4641 if (error) 4642 return error; 4643 physical = iloc.bh->b_blocknr << blockbits; 4644 offset = EXT4_GOOD_OLD_INODE_SIZE + 4645 EXT4_I(inode)->i_extra_isize; 4646 physical += offset; 4647 length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 4648 flags |= FIEMAP_EXTENT_DATA_INLINE; 4649 brelse(iloc.bh); 4650 } else { /* external block */ 4651 physical = EXT4_I(inode)->i_file_acl << blockbits; 4652 length = inode->i_sb->s_blocksize; 4653 } 4654 4655 if (physical) 4656 error = fiemap_fill_next_extent(fieinfo, 0, physical, 4657 length, flags); 4658 return (error < 0 ? error : 0); 4659} 4660 4661/* 4662 * ext4_ext_punch_hole 4663 * 4664 * Punches a hole of "length" bytes in a file starting 4665 * at byte "offset" 4666 * 4667 * @inode: The inode of the file to punch a hole in 4668 * @offset: The starting byte offset of the hole 4669 * @length: The length of the hole 4670 * 4671 * Returns the number of blocks removed or negative on err 4672 */ 4673int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) 4674{ 4675 struct inode *inode = file->f_path.dentry->d_inode; 4676 struct super_block *sb = inode->i_sb; 4677 ext4_lblk_t first_block, stop_block; 4678 struct address_space *mapping = inode->i_mapping; 4679 handle_t *handle; 4680 loff_t first_page, last_page, page_len; 4681 loff_t first_page_offset, last_page_offset; 4682 int credits, err = 0; 4683 4684 /* 4685 * Write out all dirty pages to avoid race conditions 4686 * Then release them. 4687 */ 4688 if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4689 err = filemap_write_and_wait_range(mapping, 4690 offset, offset + length - 1); 4691 4692 if (err) 4693 return err; 4694 } 4695 4696 mutex_lock(&inode->i_mutex); 4697 /* It's not possible punch hole on append only file */ 4698 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { 4699 err = -EPERM; 4700 goto out_mutex; 4701 } 4702 if (IS_SWAPFILE(inode)) { 4703 err = -ETXTBSY; 4704 goto out_mutex; 4705 } 4706 4707 /* No need to punch hole beyond i_size */ 4708 if (offset >= inode->i_size) 4709 goto out_mutex; 4710 4711 /* 4712 * If the hole extends beyond i_size, set the hole 4713 * to end after the page that contains i_size 4714 */ 4715 if (offset + length > inode->i_size) { 4716 length = inode->i_size + 4717 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 4718 offset; 4719 } 4720 4721 first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 4722 last_page = (offset + length) >> PAGE_CACHE_SHIFT; 4723 4724 first_page_offset = first_page << PAGE_CACHE_SHIFT; 4725 last_page_offset = last_page << PAGE_CACHE_SHIFT; 4726 4727 /* Now release the pages */ 4728 if (last_page_offset > first_page_offset) { 4729 truncate_pagecache_range(inode, first_page_offset, 4730 last_page_offset - 1); 4731 } 4732 4733 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4734 ext4_inode_block_unlocked_dio(inode); 4735 err = ext4_flush_unwritten_io(inode); 4736 if (err) 4737 goto out_dio; 4738 inode_dio_wait(inode); 4739 4740 credits = ext4_writepage_trans_blocks(inode); 4741 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4742 if (IS_ERR(handle)) { 4743 err = PTR_ERR(handle); 4744 goto out_dio; 4745 } 4746 4747 4748 /* 4749 * Now we need to zero out the non-page-aligned data in the 4750 * pages at the start and tail of the hole, and unmap the buffer 4751 * heads for the block aligned regions of the page that were 4752 * completely zeroed. 4753 */ 4754 if (first_page > last_page) { 4755 /* 4756 * If the file space being truncated is contained within a page 4757 * just zero out and unmap the middle of that page 4758 */ 4759 err = ext4_discard_partial_page_buffers(handle, 4760 mapping, offset, length, 0); 4761 4762 if (err) 4763 goto out; 4764 } else { 4765 /* 4766 * zero out and unmap the partial page that contains 4767 * the start of the hole 4768 */ 4769 page_len = first_page_offset - offset; 4770 if (page_len > 0) { 4771 err = ext4_discard_partial_page_buffers(handle, mapping, 4772 offset, page_len, 0); 4773 if (err) 4774 goto out; 4775 } 4776 4777 /* 4778 * zero out and unmap the partial page that contains 4779 * the end of the hole 4780 */ 4781 page_len = offset + length - last_page_offset; 4782 if (page_len > 0) { 4783 err = ext4_discard_partial_page_buffers(handle, mapping, 4784 last_page_offset, page_len, 0); 4785 if (err) 4786 goto out; 4787 } 4788 } 4789 4790 /* 4791 * If i_size is contained in the last page, we need to 4792 * unmap and zero the partial page after i_size 4793 */ 4794 if (inode->i_size >> PAGE_CACHE_SHIFT == last_page && 4795 inode->i_size % PAGE_CACHE_SIZE != 0) { 4796 4797 page_len = PAGE_CACHE_SIZE - 4798 (inode->i_size & (PAGE_CACHE_SIZE - 1)); 4799 4800 if (page_len > 0) { 4801 err = ext4_discard_partial_page_buffers(handle, 4802 mapping, inode->i_size, page_len, 0); 4803 4804 if (err) 4805 goto out; 4806 } 4807 } 4808 4809 first_block = (offset + sb->s_blocksize - 1) >> 4810 EXT4_BLOCK_SIZE_BITS(sb); 4811 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 4812 4813 /* If there are no blocks to remove, return now */ 4814 if (first_block >= stop_block) 4815 goto out; 4816 4817 down_write(&EXT4_I(inode)->i_data_sem); 4818 ext4_ext_invalidate_cache(inode); 4819 ext4_discard_preallocations(inode); 4820 4821 err = ext4_es_remove_extent(inode, first_block, 4822 stop_block - first_block); 4823 err = ext4_ext_remove_space(inode, first_block, stop_block - 1); 4824 4825 ext4_ext_invalidate_cache(inode); 4826 ext4_discard_preallocations(inode); 4827 4828 if (IS_SYNC(inode)) 4829 ext4_handle_sync(handle); 4830 4831 up_write(&EXT4_I(inode)->i_data_sem); 4832 4833out: 4834 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4835 ext4_mark_inode_dirty(handle, inode); 4836 ext4_journal_stop(handle); 4837out_dio: 4838 ext4_inode_resume_unlocked_dio(inode); 4839out_mutex: 4840 mutex_unlock(&inode->i_mutex); 4841 return err; 4842} 4843 4844int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4845 __u64 start, __u64 len) 4846{ 4847 ext4_lblk_t start_blk; 4848 int error = 0; 4849 4850 if (ext4_has_inline_data(inode)) { 4851 int has_inline = 1; 4852 4853 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline); 4854 4855 if (has_inline) 4856 return error; 4857 } 4858 4859 /* fallback to generic here if not in extents fmt */ 4860 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 4861 return generic_block_fiemap(inode, fieinfo, start, len, 4862 ext4_get_block); 4863 4864 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 4865 return -EBADR; 4866 4867 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 4868 error = ext4_xattr_fiemap(inode, fieinfo); 4869 } else { 4870 ext4_lblk_t len_blks; 4871 __u64 last_blk; 4872 4873 start_blk = start >> inode->i_sb->s_blocksize_bits; 4874 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4875 if (last_blk >= EXT_MAX_BLOCKS) 4876 last_blk = EXT_MAX_BLOCKS-1; 4877 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 4878 4879 /* 4880 * Walk the extent tree gathering extent information 4881 * and pushing extents back to the user. 4882 */ 4883 error = ext4_fill_fiemap_extents(inode, start_blk, 4884 len_blks, fieinfo); 4885 } 4886 4887 return error; 4888} 4889