aops.c revision 293b2f70b4a16a1ca91efd28ef3d6634262c6887
1/* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * Copyright (C) 2002, 2004 Oracle. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public 17 * License along with this program; if not, write to the 18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 19 * Boston, MA 021110-1307, USA. 20 */ 21 22#include <linux/fs.h> 23#include <linux/slab.h> 24#include <linux/highmem.h> 25#include <linux/pagemap.h> 26#include <asm/byteorder.h> 27#include <linux/swap.h> 28#include <linux/pipe_fs_i.h> 29#include <linux/mpage.h> 30#include <linux/quotaops.h> 31 32#define MLOG_MASK_PREFIX ML_FILE_IO 33#include <cluster/masklog.h> 34 35#include "ocfs2.h" 36 37#include "alloc.h" 38#include "aops.h" 39#include "dlmglue.h" 40#include "extent_map.h" 41#include "file.h" 42#include "inode.h" 43#include "journal.h" 44#include "suballoc.h" 45#include "super.h" 46#include "symlink.h" 47#include "refcounttree.h" 48 49#include "buffer_head_io.h" 50 51static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, 52 struct buffer_head *bh_result, int create) 53{ 54 int err = -EIO; 55 int status; 56 struct ocfs2_dinode *fe = NULL; 57 struct buffer_head *bh = NULL; 58 struct buffer_head *buffer_cache_bh = NULL; 59 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 60 void *kaddr; 61 62 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, 63 (unsigned long long)iblock, bh_result, create); 64 65 BUG_ON(ocfs2_inode_is_fast_symlink(inode)); 66 67 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { 68 mlog(ML_ERROR, "block offset > PATH_MAX: %llu", 69 (unsigned long long)iblock); 70 goto bail; 71 } 72 73 status = ocfs2_read_inode_block(inode, &bh); 74 if (status < 0) { 75 mlog_errno(status); 76 goto bail; 77 } 78 fe = (struct ocfs2_dinode *) bh->b_data; 79 80 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, 81 le32_to_cpu(fe->i_clusters))) { 82 mlog(ML_ERROR, "block offset is outside the allocated size: " 83 "%llu\n", (unsigned long long)iblock); 84 goto bail; 85 } 86 87 /* We don't use the page cache to create symlink data, so if 88 * need be, copy it over from the buffer cache. */ 89 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { 90 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + 91 iblock; 92 buffer_cache_bh = sb_getblk(osb->sb, blkno); 93 if (!buffer_cache_bh) { 94 mlog(ML_ERROR, "couldn't getblock for symlink!\n"); 95 goto bail; 96 } 97 98 /* we haven't locked out transactions, so a commit 99 * could've happened. Since we've got a reference on 100 * the bh, even if it commits while we're doing the 101 * copy, the data is still good. */ 102 if (buffer_jbd(buffer_cache_bh) 103 && ocfs2_inode_is_new(inode)) { 104 kaddr = kmap_atomic(bh_result->b_page, KM_USER0); 105 if (!kaddr) { 106 mlog(ML_ERROR, "couldn't kmap!\n"); 107 goto bail; 108 } 109 memcpy(kaddr + (bh_result->b_size * iblock), 110 buffer_cache_bh->b_data, 111 bh_result->b_size); 112 kunmap_atomic(kaddr, KM_USER0); 113 set_buffer_uptodate(bh_result); 114 } 115 brelse(buffer_cache_bh); 116 } 117 118 map_bh(bh_result, inode->i_sb, 119 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); 120 121 err = 0; 122 123bail: 124 brelse(bh); 125 126 mlog_exit(err); 127 return err; 128} 129 130int ocfs2_get_block(struct inode *inode, sector_t iblock, 131 struct buffer_head *bh_result, int create) 132{ 133 int err = 0; 134 unsigned int ext_flags; 135 u64 max_blocks = bh_result->b_size >> inode->i_blkbits; 136 u64 p_blkno, count, past_eof; 137 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 138 139 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, 140 (unsigned long long)iblock, bh_result, create); 141 142 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) 143 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", 144 inode, inode->i_ino); 145 146 if (S_ISLNK(inode->i_mode)) { 147 /* this always does I/O for some reason. */ 148 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); 149 goto bail; 150 } 151 152 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count, 153 &ext_flags); 154 if (err) { 155 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " 156 "%llu, NULL)\n", err, inode, (unsigned long long)iblock, 157 (unsigned long long)p_blkno); 158 goto bail; 159 } 160 161 if (max_blocks < count) 162 count = max_blocks; 163 164 /* 165 * ocfs2 never allocates in this function - the only time we 166 * need to use BH_New is when we're extending i_size on a file 167 * system which doesn't support holes, in which case BH_New 168 * allows block_prepare_write() to zero. 169 * 170 * If we see this on a sparse file system, then a truncate has 171 * raced us and removed the cluster. In this case, we clear 172 * the buffers dirty and uptodate bits and let the buffer code 173 * ignore it as a hole. 174 */ 175 if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { 176 clear_buffer_dirty(bh_result); 177 clear_buffer_uptodate(bh_result); 178 goto bail; 179 } 180 181 /* Treat the unwritten extent as a hole for zeroing purposes. */ 182 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) 183 map_bh(bh_result, inode->i_sb, p_blkno); 184 185 bh_result->b_size = count << inode->i_blkbits; 186 187 if (!ocfs2_sparse_alloc(osb)) { 188 if (p_blkno == 0) { 189 err = -EIO; 190 mlog(ML_ERROR, 191 "iblock = %llu p_blkno = %llu blkno=(%llu)\n", 192 (unsigned long long)iblock, 193 (unsigned long long)p_blkno, 194 (unsigned long long)OCFS2_I(inode)->ip_blkno); 195 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); 196 dump_stack(); 197 goto bail; 198 } 199 200 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 201 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, 202 (unsigned long long)past_eof); 203 204 if (create && (iblock >= past_eof)) 205 set_buffer_new(bh_result); 206 } 207 208bail: 209 if (err < 0) 210 err = -EIO; 211 212 mlog_exit(err); 213 return err; 214} 215 216int ocfs2_read_inline_data(struct inode *inode, struct page *page, 217 struct buffer_head *di_bh) 218{ 219 void *kaddr; 220 loff_t size; 221 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 222 223 if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) { 224 ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag", 225 (unsigned long long)OCFS2_I(inode)->ip_blkno); 226 return -EROFS; 227 } 228 229 size = i_size_read(inode); 230 231 if (size > PAGE_CACHE_SIZE || 232 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { 233 ocfs2_error(inode->i_sb, 234 "Inode %llu has with inline data has bad size: %Lu", 235 (unsigned long long)OCFS2_I(inode)->ip_blkno, 236 (unsigned long long)size); 237 return -EROFS; 238 } 239 240 kaddr = kmap_atomic(page, KM_USER0); 241 if (size) 242 memcpy(kaddr, di->id2.i_data.id_data, size); 243 /* Clear the remaining part of the page */ 244 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); 245 flush_dcache_page(page); 246 kunmap_atomic(kaddr, KM_USER0); 247 248 SetPageUptodate(page); 249 250 return 0; 251} 252 253static int ocfs2_readpage_inline(struct inode *inode, struct page *page) 254{ 255 int ret; 256 struct buffer_head *di_bh = NULL; 257 258 BUG_ON(!PageLocked(page)); 259 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); 260 261 ret = ocfs2_read_inode_block(inode, &di_bh); 262 if (ret) { 263 mlog_errno(ret); 264 goto out; 265 } 266 267 ret = ocfs2_read_inline_data(inode, page, di_bh); 268out: 269 unlock_page(page); 270 271 brelse(di_bh); 272 return ret; 273} 274 275static int ocfs2_readpage(struct file *file, struct page *page) 276{ 277 struct inode *inode = page->mapping->host; 278 struct ocfs2_inode_info *oi = OCFS2_I(inode); 279 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; 280 int ret, unlock = 1; 281 282 mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); 283 284 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); 285 if (ret != 0) { 286 if (ret == AOP_TRUNCATED_PAGE) 287 unlock = 0; 288 mlog_errno(ret); 289 goto out; 290 } 291 292 if (down_read_trylock(&oi->ip_alloc_sem) == 0) { 293 ret = AOP_TRUNCATED_PAGE; 294 goto out_inode_unlock; 295 } 296 297 /* 298 * i_size might have just been updated as we grabed the meta lock. We 299 * might now be discovering a truncate that hit on another node. 300 * block_read_full_page->get_block freaks out if it is asked to read 301 * beyond the end of a file, so we check here. Callers 302 * (generic_file_read, vm_ops->fault) are clever enough to check i_size 303 * and notice that the page they just read isn't needed. 304 * 305 * XXX sys_readahead() seems to get that wrong? 306 */ 307 if (start >= i_size_read(inode)) { 308 zero_user(page, 0, PAGE_SIZE); 309 SetPageUptodate(page); 310 ret = 0; 311 goto out_alloc; 312 } 313 314 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 315 ret = ocfs2_readpage_inline(inode, page); 316 else 317 ret = block_read_full_page(page, ocfs2_get_block); 318 unlock = 0; 319 320out_alloc: 321 up_read(&OCFS2_I(inode)->ip_alloc_sem); 322out_inode_unlock: 323 ocfs2_inode_unlock(inode, 0); 324out: 325 if (unlock) 326 unlock_page(page); 327 mlog_exit(ret); 328 return ret; 329} 330 331/* 332 * This is used only for read-ahead. Failures or difficult to handle 333 * situations are safe to ignore. 334 * 335 * Right now, we don't bother with BH_Boundary - in-inode extent lists 336 * are quite large (243 extents on 4k blocks), so most inodes don't 337 * grow out to a tree. If need be, detecting boundary extents could 338 * trivially be added in a future version of ocfs2_get_block(). 339 */ 340static int ocfs2_readpages(struct file *filp, struct address_space *mapping, 341 struct list_head *pages, unsigned nr_pages) 342{ 343 int ret, err = -EIO; 344 struct inode *inode = mapping->host; 345 struct ocfs2_inode_info *oi = OCFS2_I(inode); 346 loff_t start; 347 struct page *last; 348 349 /* 350 * Use the nonblocking flag for the dlm code to avoid page 351 * lock inversion, but don't bother with retrying. 352 */ 353 ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); 354 if (ret) 355 return err; 356 357 if (down_read_trylock(&oi->ip_alloc_sem) == 0) { 358 ocfs2_inode_unlock(inode, 0); 359 return err; 360 } 361 362 /* 363 * Don't bother with inline-data. There isn't anything 364 * to read-ahead in that case anyway... 365 */ 366 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 367 goto out_unlock; 368 369 /* 370 * Check whether a remote node truncated this file - we just 371 * drop out in that case as it's not worth handling here. 372 */ 373 last = list_entry(pages->prev, struct page, lru); 374 start = (loff_t)last->index << PAGE_CACHE_SHIFT; 375 if (start >= i_size_read(inode)) 376 goto out_unlock; 377 378 err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); 379 380out_unlock: 381 up_read(&oi->ip_alloc_sem); 382 ocfs2_inode_unlock(inode, 0); 383 384 return err; 385} 386 387/* Note: Because we don't support holes, our allocation has 388 * already happened (allocation writes zeros to the file data) 389 * so we don't have to worry about ordered writes in 390 * ocfs2_writepage. 391 * 392 * ->writepage is called during the process of invalidating the page cache 393 * during blocked lock processing. It can't block on any cluster locks 394 * to during block mapping. It's relying on the fact that the block 395 * mapping can't have disappeared under the dirty pages that it is 396 * being asked to write back. 397 */ 398static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) 399{ 400 int ret; 401 402 mlog_entry("(0x%p)\n", page); 403 404 ret = block_write_full_page(page, ocfs2_get_block, wbc); 405 406 mlog_exit(ret); 407 408 return ret; 409} 410 411/* 412 * This is called from ocfs2_write_zero_page() which has handled it's 413 * own cluster locking and has ensured allocation exists for those 414 * blocks to be written. 415 */ 416int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, 417 unsigned from, unsigned to) 418{ 419 int ret; 420 421 ret = block_prepare_write(page, from, to, ocfs2_get_block); 422 423 return ret; 424} 425 426/* Taken from ext3. We don't necessarily need the full blown 427 * functionality yet, but IMHO it's better to cut and paste the whole 428 * thing so we can avoid introducing our own bugs (and easily pick up 429 * their fixes when they happen) --Mark */ 430int walk_page_buffers( handle_t *handle, 431 struct buffer_head *head, 432 unsigned from, 433 unsigned to, 434 int *partial, 435 int (*fn)( handle_t *handle, 436 struct buffer_head *bh)) 437{ 438 struct buffer_head *bh; 439 unsigned block_start, block_end; 440 unsigned blocksize = head->b_size; 441 int err, ret = 0; 442 struct buffer_head *next; 443 444 for ( bh = head, block_start = 0; 445 ret == 0 && (bh != head || !block_start); 446 block_start = block_end, bh = next) 447 { 448 next = bh->b_this_page; 449 block_end = block_start + blocksize; 450 if (block_end <= from || block_start >= to) { 451 if (partial && !buffer_uptodate(bh)) 452 *partial = 1; 453 continue; 454 } 455 err = (*fn)(handle, bh); 456 if (!ret) 457 ret = err; 458 } 459 return ret; 460} 461 462handle_t *ocfs2_start_walk_page_trans(struct inode *inode, 463 struct page *page, 464 unsigned from, 465 unsigned to) 466{ 467 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 468 handle_t *handle; 469 int ret = 0; 470 471 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 472 if (IS_ERR(handle)) { 473 ret = -ENOMEM; 474 mlog_errno(ret); 475 goto out; 476 } 477 478 if (ocfs2_should_order_data(inode)) { 479 ret = ocfs2_jbd2_file_inode(handle, inode); 480 if (ret < 0) 481 mlog_errno(ret); 482 } 483out: 484 if (ret) { 485 if (!IS_ERR(handle)) 486 ocfs2_commit_trans(osb, handle); 487 handle = ERR_PTR(ret); 488 } 489 return handle; 490} 491 492static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) 493{ 494 sector_t status; 495 u64 p_blkno = 0; 496 int err = 0; 497 struct inode *inode = mapping->host; 498 499 mlog_entry("(block = %llu)\n", (unsigned long long)block); 500 501 /* We don't need to lock journal system files, since they aren't 502 * accessed concurrently from multiple nodes. 503 */ 504 if (!INODE_JOURNAL(inode)) { 505 err = ocfs2_inode_lock(inode, NULL, 0); 506 if (err) { 507 if (err != -ENOENT) 508 mlog_errno(err); 509 goto bail; 510 } 511 down_read(&OCFS2_I(inode)->ip_alloc_sem); 512 } 513 514 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) 515 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, 516 NULL); 517 518 if (!INODE_JOURNAL(inode)) { 519 up_read(&OCFS2_I(inode)->ip_alloc_sem); 520 ocfs2_inode_unlock(inode, 0); 521 } 522 523 if (err) { 524 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", 525 (unsigned long long)block); 526 mlog_errno(err); 527 goto bail; 528 } 529 530bail: 531 status = err ? 0 : p_blkno; 532 533 mlog_exit((int)status); 534 535 return status; 536} 537 538/* 539 * TODO: Make this into a generic get_blocks function. 540 * 541 * From do_direct_io in direct-io.c: 542 * "So what we do is to permit the ->get_blocks function to populate 543 * bh.b_size with the size of IO which is permitted at this offset and 544 * this i_blkbits." 545 * 546 * This function is called directly from get_more_blocks in direct-io.c. 547 * 548 * called like this: dio->get_blocks(dio->inode, fs_startblk, 549 * fs_count, map_bh, dio->rw == WRITE); 550 */ 551static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, 552 struct buffer_head *bh_result, int create) 553{ 554 int ret; 555 u64 p_blkno, inode_blocks, contig_blocks; 556 unsigned int ext_flags; 557 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; 558 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; 559 560 /* This function won't even be called if the request isn't all 561 * nicely aligned and of the right size, so there's no need 562 * for us to check any of that. */ 563 564 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 565 566 /* 567 * Any write past EOF is not allowed because we'd be extending. 568 */ 569 if (create && (iblock + max_blocks) > inode_blocks) { 570 ret = -EIO; 571 goto bail; 572 } 573 574 /* This figures out the size of the next contiguous block, and 575 * our logical offset */ 576 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, 577 &contig_blocks, &ext_flags); 578 if (ret) { 579 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", 580 (unsigned long long)iblock); 581 ret = -EIO; 582 goto bail; 583 } 584 585 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno && create) { 586 ocfs2_error(inode->i_sb, 587 "Inode %llu has a hole at block %llu\n", 588 (unsigned long long)OCFS2_I(inode)->ip_blkno, 589 (unsigned long long)iblock); 590 ret = -EROFS; 591 goto bail; 592 } 593 594 /* We should already CoW the refcounted extent. */ 595 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); 596 /* 597 * get_more_blocks() expects us to describe a hole by clearing 598 * the mapped bit on bh_result(). 599 * 600 * Consider an unwritten extent as a hole. 601 */ 602 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) 603 map_bh(bh_result, inode->i_sb, p_blkno); 604 else { 605 /* 606 * ocfs2_prepare_inode_for_write() should have caught 607 * the case where we'd be filling a hole and triggered 608 * a buffered write instead. 609 */ 610 if (create) { 611 ret = -EIO; 612 mlog_errno(ret); 613 goto bail; 614 } 615 616 clear_buffer_mapped(bh_result); 617 } 618 619 /* make sure we don't map more than max_blocks blocks here as 620 that's all the kernel will handle at this point. */ 621 if (max_blocks < contig_blocks) 622 contig_blocks = max_blocks; 623 bh_result->b_size = contig_blocks << blocksize_bits; 624bail: 625 return ret; 626} 627 628/* 629 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're 630 * particularly interested in the aio/dio case. Like the core uses 631 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from 632 * truncation on another. 633 */ 634static void ocfs2_dio_end_io(struct kiocb *iocb, 635 loff_t offset, 636 ssize_t bytes, 637 void *private) 638{ 639 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 640 int level; 641 642 /* this io's submitter should not have unlocked this before we could */ 643 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 644 645 ocfs2_iocb_clear_rw_locked(iocb); 646 647 level = ocfs2_iocb_rw_locked_level(iocb); 648 if (!level) 649 up_read(&inode->i_alloc_sem); 650 ocfs2_rw_unlock(inode, level); 651} 652 653/* 654 * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen 655 * from ext3. PageChecked() bits have been removed as OCFS2 does not 656 * do journalled data. 657 */ 658static void ocfs2_invalidatepage(struct page *page, unsigned long offset) 659{ 660 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; 661 662 jbd2_journal_invalidatepage(journal, page, offset); 663} 664 665static int ocfs2_releasepage(struct page *page, gfp_t wait) 666{ 667 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; 668 669 if (!page_has_buffers(page)) 670 return 0; 671 return jbd2_journal_try_to_free_buffers(journal, page, wait); 672} 673 674static ssize_t ocfs2_direct_IO(int rw, 675 struct kiocb *iocb, 676 const struct iovec *iov, 677 loff_t offset, 678 unsigned long nr_segs) 679{ 680 struct file *file = iocb->ki_filp; 681 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; 682 int ret; 683 684 mlog_entry_void(); 685 686 /* 687 * Fallback to buffered I/O if we see an inode without 688 * extents. 689 */ 690 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) 691 return 0; 692 693 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, 694 inode->i_sb->s_bdev, iov, offset, 695 nr_segs, 696 ocfs2_direct_IO_get_blocks, 697 ocfs2_dio_end_io); 698 699 mlog_exit(ret); 700 return ret; 701} 702 703static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, 704 u32 cpos, 705 unsigned int *start, 706 unsigned int *end) 707{ 708 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; 709 710 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) { 711 unsigned int cpp; 712 713 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits); 714 715 cluster_start = cpos % cpp; 716 cluster_start = cluster_start << osb->s_clustersize_bits; 717 718 cluster_end = cluster_start + osb->s_clustersize; 719 } 720 721 BUG_ON(cluster_start > PAGE_SIZE); 722 BUG_ON(cluster_end > PAGE_SIZE); 723 724 if (start) 725 *start = cluster_start; 726 if (end) 727 *end = cluster_end; 728} 729 730/* 731 * 'from' and 'to' are the region in the page to avoid zeroing. 732 * 733 * If pagesize > clustersize, this function will avoid zeroing outside 734 * of the cluster boundary. 735 * 736 * from == to == 0 is code for "zero the entire cluster region" 737 */ 738static void ocfs2_clear_page_regions(struct page *page, 739 struct ocfs2_super *osb, u32 cpos, 740 unsigned from, unsigned to) 741{ 742 void *kaddr; 743 unsigned int cluster_start, cluster_end; 744 745 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); 746 747 kaddr = kmap_atomic(page, KM_USER0); 748 749 if (from || to) { 750 if (from > cluster_start) 751 memset(kaddr + cluster_start, 0, from - cluster_start); 752 if (to < cluster_end) 753 memset(kaddr + to, 0, cluster_end - to); 754 } else { 755 memset(kaddr + cluster_start, 0, cluster_end - cluster_start); 756 } 757 758 kunmap_atomic(kaddr, KM_USER0); 759} 760 761/* 762 * Nonsparse file systems fully allocate before we get to the write 763 * code. This prevents ocfs2_write() from tagging the write as an 764 * allocating one, which means ocfs2_map_page_blocks() might try to 765 * read-in the blocks at the tail of our file. Avoid reading them by 766 * testing i_size against each block offset. 767 */ 768static int ocfs2_should_read_blk(struct inode *inode, struct page *page, 769 unsigned int block_start) 770{ 771 u64 offset = page_offset(page) + block_start; 772 773 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) 774 return 1; 775 776 if (i_size_read(inode) > offset) 777 return 1; 778 779 return 0; 780} 781 782/* 783 * Some of this taken from block_prepare_write(). We already have our 784 * mapping by now though, and the entire write will be allocating or 785 * it won't, so not much need to use BH_New. 786 * 787 * This will also skip zeroing, which is handled externally. 788 */ 789int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, 790 struct inode *inode, unsigned int from, 791 unsigned int to, int new) 792{ 793 int ret = 0; 794 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; 795 unsigned int block_end, block_start; 796 unsigned int bsize = 1 << inode->i_blkbits; 797 798 if (!page_has_buffers(page)) 799 create_empty_buffers(page, bsize, 0); 800 801 head = page_buffers(page); 802 for (bh = head, block_start = 0; bh != head || !block_start; 803 bh = bh->b_this_page, block_start += bsize) { 804 block_end = block_start + bsize; 805 806 clear_buffer_new(bh); 807 808 /* 809 * Ignore blocks outside of our i/o range - 810 * they may belong to unallocated clusters. 811 */ 812 if (block_start >= to || block_end <= from) { 813 if (PageUptodate(page)) 814 set_buffer_uptodate(bh); 815 continue; 816 } 817 818 /* 819 * For an allocating write with cluster size >= page 820 * size, we always write the entire page. 821 */ 822 if (new) 823 set_buffer_new(bh); 824 825 if (!buffer_mapped(bh)) { 826 map_bh(bh, inode->i_sb, *p_blkno); 827 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 828 } 829 830 if (PageUptodate(page)) { 831 if (!buffer_uptodate(bh)) 832 set_buffer_uptodate(bh); 833 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && 834 !buffer_new(bh) && 835 ocfs2_should_read_blk(inode, page, block_start) && 836 (block_start < from || block_end > to)) { 837 ll_rw_block(READ, 1, &bh); 838 *wait_bh++=bh; 839 } 840 841 *p_blkno = *p_blkno + 1; 842 } 843 844 /* 845 * If we issued read requests - let them complete. 846 */ 847 while(wait_bh > wait) { 848 wait_on_buffer(*--wait_bh); 849 if (!buffer_uptodate(*wait_bh)) 850 ret = -EIO; 851 } 852 853 if (ret == 0 || !new) 854 return ret; 855 856 /* 857 * If we get -EIO above, zero out any newly allocated blocks 858 * to avoid exposing stale data. 859 */ 860 bh = head; 861 block_start = 0; 862 do { 863 block_end = block_start + bsize; 864 if (block_end <= from) 865 goto next_bh; 866 if (block_start >= to) 867 break; 868 869 zero_user(page, block_start, bh->b_size); 870 set_buffer_uptodate(bh); 871 mark_buffer_dirty(bh); 872 873next_bh: 874 block_start = block_end; 875 bh = bh->b_this_page; 876 } while (bh != head); 877 878 return ret; 879} 880 881#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 882#define OCFS2_MAX_CTXT_PAGES 1 883#else 884#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE) 885#endif 886 887#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) 888 889/* 890 * Describe the state of a single cluster to be written to. 891 */ 892struct ocfs2_write_cluster_desc { 893 u32 c_cpos; 894 u32 c_phys; 895 /* 896 * Give this a unique field because c_phys eventually gets 897 * filled. 898 */ 899 unsigned c_new; 900 unsigned c_unwritten; 901 unsigned c_needs_zero; 902}; 903 904struct ocfs2_write_ctxt { 905 /* Logical cluster position / len of write */ 906 u32 w_cpos; 907 u32 w_clen; 908 909 /* First cluster allocated in a nonsparse extend */ 910 u32 w_first_new_cpos; 911 912 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; 913 914 /* 915 * This is true if page_size > cluster_size. 916 * 917 * It triggers a set of special cases during write which might 918 * have to deal with allocating writes to partial pages. 919 */ 920 unsigned int w_large_pages; 921 922 /* 923 * Pages involved in this write. 924 * 925 * w_target_page is the page being written to by the user. 926 * 927 * w_pages is an array of pages which always contains 928 * w_target_page, and in the case of an allocating write with 929 * page_size < cluster size, it will contain zero'd and mapped 930 * pages adjacent to w_target_page which need to be written 931 * out in so that future reads from that region will get 932 * zero's. 933 */ 934 struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; 935 unsigned int w_num_pages; 936 struct page *w_target_page; 937 938 /* 939 * ocfs2_write_end() uses this to know what the real range to 940 * write in the target should be. 941 */ 942 unsigned int w_target_from; 943 unsigned int w_target_to; 944 945 /* 946 * We could use journal_current_handle() but this is cleaner, 947 * IMHO -Mark 948 */ 949 handle_t *w_handle; 950 951 struct buffer_head *w_di_bh; 952 953 struct ocfs2_cached_dealloc_ctxt w_dealloc; 954}; 955 956void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) 957{ 958 int i; 959 960 for(i = 0; i < num_pages; i++) { 961 if (pages[i]) { 962 unlock_page(pages[i]); 963 mark_page_accessed(pages[i]); 964 page_cache_release(pages[i]); 965 } 966 } 967} 968 969static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) 970{ 971 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); 972 973 brelse(wc->w_di_bh); 974 kfree(wc); 975} 976 977static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, 978 struct ocfs2_super *osb, loff_t pos, 979 unsigned len, struct buffer_head *di_bh) 980{ 981 u32 cend; 982 struct ocfs2_write_ctxt *wc; 983 984 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); 985 if (!wc) 986 return -ENOMEM; 987 988 wc->w_cpos = pos >> osb->s_clustersize_bits; 989 wc->w_first_new_cpos = UINT_MAX; 990 cend = (pos + len - 1) >> osb->s_clustersize_bits; 991 wc->w_clen = cend - wc->w_cpos + 1; 992 get_bh(di_bh); 993 wc->w_di_bh = di_bh; 994 995 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) 996 wc->w_large_pages = 1; 997 else 998 wc->w_large_pages = 0; 999 1000 ocfs2_init_dealloc_ctxt(&wc->w_dealloc); 1001 1002 *wcp = wc; 1003 1004 return 0; 1005} 1006 1007/* 1008 * If a page has any new buffers, zero them out here, and mark them uptodate 1009 * and dirty so they'll be written out (in order to prevent uninitialised 1010 * block data from leaking). And clear the new bit. 1011 */ 1012static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) 1013{ 1014 unsigned int block_start, block_end; 1015 struct buffer_head *head, *bh; 1016 1017 BUG_ON(!PageLocked(page)); 1018 if (!page_has_buffers(page)) 1019 return; 1020 1021 bh = head = page_buffers(page); 1022 block_start = 0; 1023 do { 1024 block_end = block_start + bh->b_size; 1025 1026 if (buffer_new(bh)) { 1027 if (block_end > from && block_start < to) { 1028 if (!PageUptodate(page)) { 1029 unsigned start, end; 1030 1031 start = max(from, block_start); 1032 end = min(to, block_end); 1033 1034 zero_user_segment(page, start, end); 1035 set_buffer_uptodate(bh); 1036 } 1037 1038 clear_buffer_new(bh); 1039 mark_buffer_dirty(bh); 1040 } 1041 } 1042 1043 block_start = block_end; 1044 bh = bh->b_this_page; 1045 } while (bh != head); 1046} 1047 1048/* 1049 * Only called when we have a failure during allocating write to write 1050 * zero's to the newly allocated region. 1051 */ 1052static void ocfs2_write_failure(struct inode *inode, 1053 struct ocfs2_write_ctxt *wc, 1054 loff_t user_pos, unsigned user_len) 1055{ 1056 int i; 1057 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), 1058 to = user_pos + user_len; 1059 struct page *tmppage; 1060 1061 ocfs2_zero_new_buffers(wc->w_target_page, from, to); 1062 1063 for(i = 0; i < wc->w_num_pages; i++) { 1064 tmppage = wc->w_pages[i]; 1065 1066 if (page_has_buffers(tmppage)) { 1067 if (ocfs2_should_order_data(inode)) 1068 ocfs2_jbd2_file_inode(wc->w_handle, inode); 1069 1070 block_commit_write(tmppage, from, to); 1071 } 1072 } 1073} 1074 1075static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, 1076 struct ocfs2_write_ctxt *wc, 1077 struct page *page, u32 cpos, 1078 loff_t user_pos, unsigned user_len, 1079 int new) 1080{ 1081 int ret; 1082 unsigned int map_from = 0, map_to = 0; 1083 unsigned int cluster_start, cluster_end; 1084 unsigned int user_data_from = 0, user_data_to = 0; 1085 1086 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, 1087 &cluster_start, &cluster_end); 1088 1089 if (page == wc->w_target_page) { 1090 map_from = user_pos & (PAGE_CACHE_SIZE - 1); 1091 map_to = map_from + user_len; 1092 1093 if (new) 1094 ret = ocfs2_map_page_blocks(page, p_blkno, inode, 1095 cluster_start, cluster_end, 1096 new); 1097 else 1098 ret = ocfs2_map_page_blocks(page, p_blkno, inode, 1099 map_from, map_to, new); 1100 if (ret) { 1101 mlog_errno(ret); 1102 goto out; 1103 } 1104 1105 user_data_from = map_from; 1106 user_data_to = map_to; 1107 if (new) { 1108 map_from = cluster_start; 1109 map_to = cluster_end; 1110 } 1111 } else { 1112 /* 1113 * If we haven't allocated the new page yet, we 1114 * shouldn't be writing it out without copying user 1115 * data. This is likely a math error from the caller. 1116 */ 1117 BUG_ON(!new); 1118 1119 map_from = cluster_start; 1120 map_to = cluster_end; 1121 1122 ret = ocfs2_map_page_blocks(page, p_blkno, inode, 1123 cluster_start, cluster_end, new); 1124 if (ret) { 1125 mlog_errno(ret); 1126 goto out; 1127 } 1128 } 1129 1130 /* 1131 * Parts of newly allocated pages need to be zero'd. 1132 * 1133 * Above, we have also rewritten 'to' and 'from' - as far as 1134 * the rest of the function is concerned, the entire cluster 1135 * range inside of a page needs to be written. 1136 * 1137 * We can skip this if the page is up to date - it's already 1138 * been zero'd from being read in as a hole. 1139 */ 1140 if (new && !PageUptodate(page)) 1141 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), 1142 cpos, user_data_from, user_data_to); 1143 1144 flush_dcache_page(page); 1145 1146out: 1147 return ret; 1148} 1149 1150/* 1151 * This function will only grab one clusters worth of pages. 1152 */ 1153static int ocfs2_grab_pages_for_write(struct address_space *mapping, 1154 struct ocfs2_write_ctxt *wc, 1155 u32 cpos, loff_t user_pos, int new, 1156 struct page *mmap_page) 1157{ 1158 int ret = 0, i; 1159 unsigned long start, target_index, index; 1160 struct inode *inode = mapping->host; 1161 1162 target_index = user_pos >> PAGE_CACHE_SHIFT; 1163 1164 /* 1165 * Figure out how many pages we'll be manipulating here. For 1166 * non allocating write, we just change the one 1167 * page. Otherwise, we'll need a whole clusters worth. 1168 */ 1169 if (new) { 1170 wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); 1171 start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); 1172 } else { 1173 wc->w_num_pages = 1; 1174 start = target_index; 1175 } 1176 1177 for(i = 0; i < wc->w_num_pages; i++) { 1178 index = start + i; 1179 1180 if (index == target_index && mmap_page) { 1181 /* 1182 * ocfs2_pagemkwrite() is a little different 1183 * and wants us to directly use the page 1184 * passed in. 1185 */ 1186 lock_page(mmap_page); 1187 1188 if (mmap_page->mapping != mapping) { 1189 unlock_page(mmap_page); 1190 /* 1191 * Sanity check - the locking in 1192 * ocfs2_pagemkwrite() should ensure 1193 * that this code doesn't trigger. 1194 */ 1195 ret = -EINVAL; 1196 mlog_errno(ret); 1197 goto out; 1198 } 1199 1200 page_cache_get(mmap_page); 1201 wc->w_pages[i] = mmap_page; 1202 } else { 1203 wc->w_pages[i] = find_or_create_page(mapping, index, 1204 GFP_NOFS); 1205 if (!wc->w_pages[i]) { 1206 ret = -ENOMEM; 1207 mlog_errno(ret); 1208 goto out; 1209 } 1210 } 1211 1212 if (index == target_index) 1213 wc->w_target_page = wc->w_pages[i]; 1214 } 1215out: 1216 return ret; 1217} 1218 1219/* 1220 * Prepare a single cluster for write one cluster into the file. 1221 */ 1222static int ocfs2_write_cluster(struct address_space *mapping, 1223 u32 phys, unsigned int unwritten, 1224 unsigned int should_zero, 1225 struct ocfs2_alloc_context *data_ac, 1226 struct ocfs2_alloc_context *meta_ac, 1227 struct ocfs2_write_ctxt *wc, u32 cpos, 1228 loff_t user_pos, unsigned user_len) 1229{ 1230 int ret, i, new; 1231 u64 v_blkno, p_blkno; 1232 struct inode *inode = mapping->host; 1233 struct ocfs2_extent_tree et; 1234 1235 new = phys == 0 ? 1 : 0; 1236 if (new) { 1237 u32 tmp_pos; 1238 1239 /* 1240 * This is safe to call with the page locks - it won't take 1241 * any additional semaphores or cluster locks. 1242 */ 1243 tmp_pos = cpos; 1244 ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, 1245 &tmp_pos, 1, 0, wc->w_di_bh, 1246 wc->w_handle, data_ac, 1247 meta_ac, NULL); 1248 /* 1249 * This shouldn't happen because we must have already 1250 * calculated the correct meta data allocation required. The 1251 * internal tree allocation code should know how to increase 1252 * transaction credits itself. 1253 * 1254 * If need be, we could handle -EAGAIN for a 1255 * RESTART_TRANS here. 1256 */ 1257 mlog_bug_on_msg(ret == -EAGAIN, 1258 "Inode %llu: EAGAIN return during allocation.\n", 1259 (unsigned long long)OCFS2_I(inode)->ip_blkno); 1260 if (ret < 0) { 1261 mlog_errno(ret); 1262 goto out; 1263 } 1264 } else if (unwritten) { 1265 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), 1266 wc->w_di_bh); 1267 ret = ocfs2_mark_extent_written(inode, &et, 1268 wc->w_handle, cpos, 1, phys, 1269 meta_ac, &wc->w_dealloc); 1270 if (ret < 0) { 1271 mlog_errno(ret); 1272 goto out; 1273 } 1274 } 1275 1276 if (should_zero) 1277 v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos); 1278 else 1279 v_blkno = user_pos >> inode->i_sb->s_blocksize_bits; 1280 1281 /* 1282 * The only reason this should fail is due to an inability to 1283 * find the extent added. 1284 */ 1285 ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL, 1286 NULL); 1287 if (ret < 0) { 1288 ocfs2_error(inode->i_sb, "Corrupting extend for inode %llu, " 1289 "at logical block %llu", 1290 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1291 (unsigned long long)v_blkno); 1292 goto out; 1293 } 1294 1295 BUG_ON(p_blkno == 0); 1296 1297 for(i = 0; i < wc->w_num_pages; i++) { 1298 int tmpret; 1299 1300 tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, 1301 wc->w_pages[i], cpos, 1302 user_pos, user_len, 1303 should_zero); 1304 if (tmpret) { 1305 mlog_errno(tmpret); 1306 if (ret == 0) 1307 ret = tmpret; 1308 } 1309 } 1310 1311 /* 1312 * We only have cleanup to do in case of allocating write. 1313 */ 1314 if (ret && new) 1315 ocfs2_write_failure(inode, wc, user_pos, user_len); 1316 1317out: 1318 1319 return ret; 1320} 1321 1322static int ocfs2_write_cluster_by_desc(struct address_space *mapping, 1323 struct ocfs2_alloc_context *data_ac, 1324 struct ocfs2_alloc_context *meta_ac, 1325 struct ocfs2_write_ctxt *wc, 1326 loff_t pos, unsigned len) 1327{ 1328 int ret, i; 1329 loff_t cluster_off; 1330 unsigned int local_len = len; 1331 struct ocfs2_write_cluster_desc *desc; 1332 struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); 1333 1334 for (i = 0; i < wc->w_clen; i++) { 1335 desc = &wc->w_desc[i]; 1336 1337 /* 1338 * We have to make sure that the total write passed in 1339 * doesn't extend past a single cluster. 1340 */ 1341 local_len = len; 1342 cluster_off = pos & (osb->s_clustersize - 1); 1343 if ((cluster_off + local_len) > osb->s_clustersize) 1344 local_len = osb->s_clustersize - cluster_off; 1345 1346 ret = ocfs2_write_cluster(mapping, desc->c_phys, 1347 desc->c_unwritten, 1348 desc->c_needs_zero, 1349 data_ac, meta_ac, 1350 wc, desc->c_cpos, pos, local_len); 1351 if (ret) { 1352 mlog_errno(ret); 1353 goto out; 1354 } 1355 1356 len -= local_len; 1357 pos += local_len; 1358 } 1359 1360 ret = 0; 1361out: 1362 return ret; 1363} 1364 1365/* 1366 * ocfs2_write_end() wants to know which parts of the target page it 1367 * should complete the write on. It's easiest to compute them ahead of 1368 * time when a more complete view of the write is available. 1369 */ 1370static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, 1371 struct ocfs2_write_ctxt *wc, 1372 loff_t pos, unsigned len, int alloc) 1373{ 1374 struct ocfs2_write_cluster_desc *desc; 1375 1376 wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); 1377 wc->w_target_to = wc->w_target_from + len; 1378 1379 if (alloc == 0) 1380 return; 1381 1382 /* 1383 * Allocating write - we may have different boundaries based 1384 * on page size and cluster size. 1385 * 1386 * NOTE: We can no longer compute one value from the other as 1387 * the actual write length and user provided length may be 1388 * different. 1389 */ 1390 1391 if (wc->w_large_pages) { 1392 /* 1393 * We only care about the 1st and last cluster within 1394 * our range and whether they should be zero'd or not. Either 1395 * value may be extended out to the start/end of a 1396 * newly allocated cluster. 1397 */ 1398 desc = &wc->w_desc[0]; 1399 if (desc->c_needs_zero) 1400 ocfs2_figure_cluster_boundaries(osb, 1401 desc->c_cpos, 1402 &wc->w_target_from, 1403 NULL); 1404 1405 desc = &wc->w_desc[wc->w_clen - 1]; 1406 if (desc->c_needs_zero) 1407 ocfs2_figure_cluster_boundaries(osb, 1408 desc->c_cpos, 1409 NULL, 1410 &wc->w_target_to); 1411 } else { 1412 wc->w_target_from = 0; 1413 wc->w_target_to = PAGE_CACHE_SIZE; 1414 } 1415} 1416 1417/* 1418 * Populate each single-cluster write descriptor in the write context 1419 * with information about the i/o to be done. 1420 * 1421 * Returns the number of clusters that will have to be allocated, as 1422 * well as a worst case estimate of the number of extent records that 1423 * would have to be created during a write to an unwritten region. 1424 */ 1425static int ocfs2_populate_write_desc(struct inode *inode, 1426 struct ocfs2_write_ctxt *wc, 1427 unsigned int *clusters_to_alloc, 1428 unsigned int *extents_to_split) 1429{ 1430 int ret; 1431 struct ocfs2_write_cluster_desc *desc; 1432 unsigned int num_clusters = 0; 1433 unsigned int ext_flags = 0; 1434 u32 phys = 0; 1435 int i; 1436 1437 *clusters_to_alloc = 0; 1438 *extents_to_split = 0; 1439 1440 for (i = 0; i < wc->w_clen; i++) { 1441 desc = &wc->w_desc[i]; 1442 desc->c_cpos = wc->w_cpos + i; 1443 1444 if (num_clusters == 0) { 1445 /* 1446 * Need to look up the next extent record. 1447 */ 1448 ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, 1449 &num_clusters, &ext_flags); 1450 if (ret) { 1451 mlog_errno(ret); 1452 goto out; 1453 } 1454 1455 /* We should already CoW the refcountd extent. */ 1456 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); 1457 1458 /* 1459 * Assume worst case - that we're writing in 1460 * the middle of the extent. 1461 * 1462 * We can assume that the write proceeds from 1463 * left to right, in which case the extent 1464 * insert code is smart enough to coalesce the 1465 * next splits into the previous records created. 1466 */ 1467 if (ext_flags & OCFS2_EXT_UNWRITTEN) 1468 *extents_to_split = *extents_to_split + 2; 1469 } else if (phys) { 1470 /* 1471 * Only increment phys if it doesn't describe 1472 * a hole. 1473 */ 1474 phys++; 1475 } 1476 1477 /* 1478 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse 1479 * file that got extended. w_first_new_cpos tells us 1480 * where the newly allocated clusters are so we can 1481 * zero them. 1482 */ 1483 if (desc->c_cpos >= wc->w_first_new_cpos) { 1484 BUG_ON(phys == 0); 1485 desc->c_needs_zero = 1; 1486 } 1487 1488 desc->c_phys = phys; 1489 if (phys == 0) { 1490 desc->c_new = 1; 1491 desc->c_needs_zero = 1; 1492 *clusters_to_alloc = *clusters_to_alloc + 1; 1493 } 1494 1495 if (ext_flags & OCFS2_EXT_UNWRITTEN) { 1496 desc->c_unwritten = 1; 1497 desc->c_needs_zero = 1; 1498 } 1499 1500 num_clusters--; 1501 } 1502 1503 ret = 0; 1504out: 1505 return ret; 1506} 1507 1508static int ocfs2_write_begin_inline(struct address_space *mapping, 1509 struct inode *inode, 1510 struct ocfs2_write_ctxt *wc) 1511{ 1512 int ret; 1513 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1514 struct page *page; 1515 handle_t *handle; 1516 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1517 1518 page = find_or_create_page(mapping, 0, GFP_NOFS); 1519 if (!page) { 1520 ret = -ENOMEM; 1521 mlog_errno(ret); 1522 goto out; 1523 } 1524 /* 1525 * If we don't set w_num_pages then this page won't get unlocked 1526 * and freed on cleanup of the write context. 1527 */ 1528 wc->w_pages[0] = wc->w_target_page = page; 1529 wc->w_num_pages = 1; 1530 1531 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 1532 if (IS_ERR(handle)) { 1533 ret = PTR_ERR(handle); 1534 mlog_errno(ret); 1535 goto out; 1536 } 1537 1538 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, 1539 OCFS2_JOURNAL_ACCESS_WRITE); 1540 if (ret) { 1541 ocfs2_commit_trans(osb, handle); 1542 1543 mlog_errno(ret); 1544 goto out; 1545 } 1546 1547 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) 1548 ocfs2_set_inode_data_inline(inode, di); 1549 1550 if (!PageUptodate(page)) { 1551 ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); 1552 if (ret) { 1553 ocfs2_commit_trans(osb, handle); 1554 1555 goto out; 1556 } 1557 } 1558 1559 wc->w_handle = handle; 1560out: 1561 return ret; 1562} 1563 1564int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) 1565{ 1566 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 1567 1568 if (new_size <= le16_to_cpu(di->id2.i_data.id_count)) 1569 return 1; 1570 return 0; 1571} 1572 1573static int ocfs2_try_to_write_inline_data(struct address_space *mapping, 1574 struct inode *inode, loff_t pos, 1575 unsigned len, struct page *mmap_page, 1576 struct ocfs2_write_ctxt *wc) 1577{ 1578 int ret, written = 0; 1579 loff_t end = pos + len; 1580 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1581 struct ocfs2_dinode *di = NULL; 1582 1583 mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n", 1584 (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos, 1585 oi->ip_dyn_features); 1586 1587 /* 1588 * Handle inodes which already have inline data 1st. 1589 */ 1590 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 1591 if (mmap_page == NULL && 1592 ocfs2_size_fits_inline_data(wc->w_di_bh, end)) 1593 goto do_inline_write; 1594 1595 /* 1596 * The write won't fit - we have to give this inode an 1597 * inline extent list now. 1598 */ 1599 ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh); 1600 if (ret) 1601 mlog_errno(ret); 1602 goto out; 1603 } 1604 1605 /* 1606 * Check whether the inode can accept inline data. 1607 */ 1608 if (oi->ip_clusters != 0 || i_size_read(inode) != 0) 1609 return 0; 1610 1611 /* 1612 * Check whether the write can fit. 1613 */ 1614 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1615 if (mmap_page || 1616 end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) 1617 return 0; 1618 1619do_inline_write: 1620 ret = ocfs2_write_begin_inline(mapping, inode, wc); 1621 if (ret) { 1622 mlog_errno(ret); 1623 goto out; 1624 } 1625 1626 /* 1627 * This signals to the caller that the data can be written 1628 * inline. 1629 */ 1630 written = 1; 1631out: 1632 return written ? written : ret; 1633} 1634 1635/* 1636 * This function only does anything for file systems which can't 1637 * handle sparse files. 1638 * 1639 * What we want to do here is fill in any hole between the current end 1640 * of allocation and the end of our write. That way the rest of the 1641 * write path can treat it as an non-allocating write, which has no 1642 * special case code for sparse/nonsparse files. 1643 */ 1644static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos, 1645 unsigned len, 1646 struct ocfs2_write_ctxt *wc) 1647{ 1648 int ret; 1649 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1650 loff_t newsize = pos + len; 1651 1652 if (ocfs2_sparse_alloc(osb)) 1653 return 0; 1654 1655 if (newsize <= i_size_read(inode)) 1656 return 0; 1657 1658 ret = ocfs2_extend_no_holes(inode, newsize, pos); 1659 if (ret) 1660 mlog_errno(ret); 1661 1662 wc->w_first_new_cpos = 1663 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); 1664 1665 return ret; 1666} 1667 1668int ocfs2_write_begin_nolock(struct address_space *mapping, 1669 loff_t pos, unsigned len, unsigned flags, 1670 struct page **pagep, void **fsdata, 1671 struct buffer_head *di_bh, struct page *mmap_page) 1672{ 1673 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; 1674 unsigned int clusters_to_alloc, extents_to_split; 1675 struct ocfs2_write_ctxt *wc; 1676 struct inode *inode = mapping->host; 1677 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1678 struct ocfs2_dinode *di; 1679 struct ocfs2_alloc_context *data_ac = NULL; 1680 struct ocfs2_alloc_context *meta_ac = NULL; 1681 handle_t *handle; 1682 struct ocfs2_extent_tree et; 1683 1684 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); 1685 if (ret) { 1686 mlog_errno(ret); 1687 return ret; 1688 } 1689 1690 if (ocfs2_supports_inline_data(osb)) { 1691 ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, 1692 mmap_page, wc); 1693 if (ret == 1) { 1694 ret = 0; 1695 goto success; 1696 } 1697 if (ret < 0) { 1698 mlog_errno(ret); 1699 goto out; 1700 } 1701 } 1702 1703 ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc); 1704 if (ret) { 1705 mlog_errno(ret); 1706 goto out; 1707 } 1708 1709 ret = ocfs2_check_range_for_refcount(inode, pos, len); 1710 if (ret < 0) { 1711 mlog_errno(ret); 1712 goto out; 1713 } else if (ret == 1) { 1714 ret = ocfs2_refcount_cow(inode, di_bh, 1715 wc->w_cpos, wc->w_clen); 1716 if (ret) { 1717 mlog_errno(ret); 1718 goto out; 1719 } 1720 } 1721 1722 ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, 1723 &extents_to_split); 1724 if (ret) { 1725 mlog_errno(ret); 1726 goto out; 1727 } 1728 1729 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1730 1731 /* 1732 * We set w_target_from, w_target_to here so that 1733 * ocfs2_write_end() knows which range in the target page to 1734 * write out. An allocation requires that we write the entire 1735 * cluster range. 1736 */ 1737 if (clusters_to_alloc || extents_to_split) { 1738 /* 1739 * XXX: We are stretching the limits of 1740 * ocfs2_lock_allocators(). It greatly over-estimates 1741 * the work to be done. 1742 */ 1743 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u," 1744 " clusters_to_add = %u, extents_to_split = %u\n", 1745 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1746 (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), 1747 clusters_to_alloc, extents_to_split); 1748 1749 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), 1750 wc->w_di_bh); 1751 ret = ocfs2_lock_allocators(inode, &et, 1752 clusters_to_alloc, extents_to_split, 1753 &data_ac, &meta_ac); 1754 if (ret) { 1755 mlog_errno(ret); 1756 goto out; 1757 } 1758 1759 credits = ocfs2_calc_extend_credits(inode->i_sb, 1760 &di->id2.i_list, 1761 clusters_to_alloc); 1762 1763 } 1764 1765 /* 1766 * We have to zero sparse allocated clusters, unwritten extent clusters, 1767 * and non-sparse clusters we just extended. For non-sparse writes, 1768 * we know zeros will only be needed in the first and/or last cluster. 1769 */ 1770 if (clusters_to_alloc || extents_to_split || 1771 (wc->w_clen && (wc->w_desc[0].c_needs_zero || 1772 wc->w_desc[wc->w_clen - 1].c_needs_zero))) 1773 cluster_of_pages = 1; 1774 else 1775 cluster_of_pages = 0; 1776 1777 ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); 1778 1779 handle = ocfs2_start_trans(osb, credits); 1780 if (IS_ERR(handle)) { 1781 ret = PTR_ERR(handle); 1782 mlog_errno(ret); 1783 goto out; 1784 } 1785 1786 wc->w_handle = handle; 1787 1788 if (clusters_to_alloc && vfs_dq_alloc_space_nodirty(inode, 1789 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc))) { 1790 ret = -EDQUOT; 1791 goto out_commit; 1792 } 1793 /* 1794 * We don't want this to fail in ocfs2_write_end(), so do it 1795 * here. 1796 */ 1797 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, 1798 OCFS2_JOURNAL_ACCESS_WRITE); 1799 if (ret) { 1800 mlog_errno(ret); 1801 goto out_quota; 1802 } 1803 1804 /* 1805 * Fill our page array first. That way we've grabbed enough so 1806 * that we can zero and flush if we error after adding the 1807 * extent. 1808 */ 1809 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, 1810 cluster_of_pages, mmap_page); 1811 if (ret) { 1812 mlog_errno(ret); 1813 goto out_quota; 1814 } 1815 1816 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, 1817 len); 1818 if (ret) { 1819 mlog_errno(ret); 1820 goto out_quota; 1821 } 1822 1823 if (data_ac) 1824 ocfs2_free_alloc_context(data_ac); 1825 if (meta_ac) 1826 ocfs2_free_alloc_context(meta_ac); 1827 1828success: 1829 *pagep = wc->w_target_page; 1830 *fsdata = wc; 1831 return 0; 1832out_quota: 1833 if (clusters_to_alloc) 1834 vfs_dq_free_space(inode, 1835 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); 1836out_commit: 1837 ocfs2_commit_trans(osb, handle); 1838 1839out: 1840 ocfs2_free_write_ctxt(wc); 1841 1842 if (data_ac) 1843 ocfs2_free_alloc_context(data_ac); 1844 if (meta_ac) 1845 ocfs2_free_alloc_context(meta_ac); 1846 return ret; 1847} 1848 1849static int ocfs2_write_begin(struct file *file, struct address_space *mapping, 1850 loff_t pos, unsigned len, unsigned flags, 1851 struct page **pagep, void **fsdata) 1852{ 1853 int ret; 1854 struct buffer_head *di_bh = NULL; 1855 struct inode *inode = mapping->host; 1856 1857 ret = ocfs2_inode_lock(inode, &di_bh, 1); 1858 if (ret) { 1859 mlog_errno(ret); 1860 return ret; 1861 } 1862 1863 /* 1864 * Take alloc sem here to prevent concurrent lookups. That way 1865 * the mapping, zeroing and tree manipulation within 1866 * ocfs2_write() will be safe against ->readpage(). This 1867 * should also serve to lock out allocation from a shared 1868 * writeable region. 1869 */ 1870 down_write(&OCFS2_I(inode)->ip_alloc_sem); 1871 1872 ret = ocfs2_write_begin_nolock(mapping, pos, len, flags, pagep, 1873 fsdata, di_bh, NULL); 1874 if (ret) { 1875 mlog_errno(ret); 1876 goto out_fail; 1877 } 1878 1879 brelse(di_bh); 1880 1881 return 0; 1882 1883out_fail: 1884 up_write(&OCFS2_I(inode)->ip_alloc_sem); 1885 1886 brelse(di_bh); 1887 ocfs2_inode_unlock(inode, 1); 1888 1889 return ret; 1890} 1891 1892static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, 1893 unsigned len, unsigned *copied, 1894 struct ocfs2_dinode *di, 1895 struct ocfs2_write_ctxt *wc) 1896{ 1897 void *kaddr; 1898 1899 if (unlikely(*copied < len)) { 1900 if (!PageUptodate(wc->w_target_page)) { 1901 *copied = 0; 1902 return; 1903 } 1904 } 1905 1906 kaddr = kmap_atomic(wc->w_target_page, KM_USER0); 1907 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); 1908 kunmap_atomic(kaddr, KM_USER0); 1909 1910 mlog(0, "Data written to inode at offset %llu. " 1911 "id_count = %u, copied = %u, i_dyn_features = 0x%x\n", 1912 (unsigned long long)pos, *copied, 1913 le16_to_cpu(di->id2.i_data.id_count), 1914 le16_to_cpu(di->i_dyn_features)); 1915} 1916 1917int ocfs2_write_end_nolock(struct address_space *mapping, 1918 loff_t pos, unsigned len, unsigned copied, 1919 struct page *page, void *fsdata) 1920{ 1921 int i; 1922 unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1); 1923 struct inode *inode = mapping->host; 1924 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1925 struct ocfs2_write_ctxt *wc = fsdata; 1926 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1927 handle_t *handle = wc->w_handle; 1928 struct page *tmppage; 1929 1930 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 1931 ocfs2_write_end_inline(inode, pos, len, &copied, di, wc); 1932 goto out_write_size; 1933 } 1934 1935 if (unlikely(copied < len)) { 1936 if (!PageUptodate(wc->w_target_page)) 1937 copied = 0; 1938 1939 ocfs2_zero_new_buffers(wc->w_target_page, start+copied, 1940 start+len); 1941 } 1942 flush_dcache_page(wc->w_target_page); 1943 1944 for(i = 0; i < wc->w_num_pages; i++) { 1945 tmppage = wc->w_pages[i]; 1946 1947 if (tmppage == wc->w_target_page) { 1948 from = wc->w_target_from; 1949 to = wc->w_target_to; 1950 1951 BUG_ON(from > PAGE_CACHE_SIZE || 1952 to > PAGE_CACHE_SIZE || 1953 to < from); 1954 } else { 1955 /* 1956 * Pages adjacent to the target (if any) imply 1957 * a hole-filling write in which case we want 1958 * to flush their entire range. 1959 */ 1960 from = 0; 1961 to = PAGE_CACHE_SIZE; 1962 } 1963 1964 if (page_has_buffers(tmppage)) { 1965 if (ocfs2_should_order_data(inode)) 1966 ocfs2_jbd2_file_inode(wc->w_handle, inode); 1967 block_commit_write(tmppage, from, to); 1968 } 1969 } 1970 1971out_write_size: 1972 pos += copied; 1973 if (pos > inode->i_size) { 1974 i_size_write(inode, pos); 1975 mark_inode_dirty(inode); 1976 } 1977 inode->i_blocks = ocfs2_inode_sector_count(inode); 1978 di->i_size = cpu_to_le64((u64)i_size_read(inode)); 1979 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1980 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); 1981 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 1982 ocfs2_journal_dirty(handle, wc->w_di_bh); 1983 1984 ocfs2_commit_trans(osb, handle); 1985 1986 ocfs2_run_deallocs(osb, &wc->w_dealloc); 1987 1988 ocfs2_free_write_ctxt(wc); 1989 1990 return copied; 1991} 1992 1993static int ocfs2_write_end(struct file *file, struct address_space *mapping, 1994 loff_t pos, unsigned len, unsigned copied, 1995 struct page *page, void *fsdata) 1996{ 1997 int ret; 1998 struct inode *inode = mapping->host; 1999 2000 ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata); 2001 2002 up_write(&OCFS2_I(inode)->ip_alloc_sem); 2003 ocfs2_inode_unlock(inode, 1); 2004 2005 return ret; 2006} 2007 2008const struct address_space_operations ocfs2_aops = { 2009 .readpage = ocfs2_readpage, 2010 .readpages = ocfs2_readpages, 2011 .writepage = ocfs2_writepage, 2012 .write_begin = ocfs2_write_begin, 2013 .write_end = ocfs2_write_end, 2014 .bmap = ocfs2_bmap, 2015 .sync_page = block_sync_page, 2016 .direct_IO = ocfs2_direct_IO, 2017 .invalidatepage = ocfs2_invalidatepage, 2018 .releasepage = ocfs2_releasepage, 2019 .migratepage = buffer_migrate_page, 2020 .is_partially_uptodate = block_is_partially_uptodate, 2021}; 2022