scrub.c revision 27f9f02357f2bff96fc5e8a000c78ec5f96d42af
1/* 2 * Copyright (C) 2011, 2012 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19#include <linux/blkdev.h> 20#include <linux/ratelimit.h> 21#include "ctree.h" 22#include "volumes.h" 23#include "disk-io.h" 24#include "ordered-data.h" 25#include "transaction.h" 26#include "backref.h" 27#include "extent_io.h" 28#include "dev-replace.h" 29#include "check-integrity.h" 30#include "rcu-string.h" 31#include "raid56.h" 32 33/* 34 * This is only the first step towards a full-features scrub. It reads all 35 * extent and super block and verifies the checksums. In case a bad checksum 36 * is found or the extent cannot be read, good data will be written back if 37 * any can be found. 38 * 39 * Future enhancements: 40 * - In case an unrepairable extent is encountered, track which files are 41 * affected and report them 42 * - track and record media errors, throw out bad devices 43 * - add a mode to also read unallocated space 44 */ 45 46struct scrub_block; 47struct scrub_ctx; 48 49/* 50 * the following three values only influence the performance. 51 * The last one configures the number of parallel and outstanding I/O 52 * operations. The first two values configure an upper limit for the number 53 * of (dynamically allocated) pages that are added to a bio. 54 */ 55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */ 56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */ 57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */ 58 59/* 60 * the following value times PAGE_SIZE needs to be large enough to match the 61 * largest node/leaf/sector size that shall be supported. 62 * Values larger than BTRFS_STRIPE_LEN are not supported. 63 */ 64#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ 65 66struct scrub_page { 67 struct scrub_block *sblock; 68 struct page *page; 69 struct btrfs_device *dev; 70 u64 flags; /* extent flags */ 71 u64 generation; 72 u64 logical; 73 u64 physical; 74 u64 physical_for_dev_replace; 75 atomic_t ref_count; 76 struct { 77 unsigned int mirror_num:8; 78 unsigned int have_csum:1; 79 unsigned int io_error:1; 80 }; 81 u8 csum[BTRFS_CSUM_SIZE]; 82}; 83 84struct scrub_bio { 85 int index; 86 struct scrub_ctx *sctx; 87 struct btrfs_device *dev; 88 struct bio *bio; 89 int err; 90 u64 logical; 91 u64 physical; 92#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO 93 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO]; 94#else 95 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO]; 96#endif 97 int page_count; 98 int next_free; 99 struct btrfs_work work; 100}; 101 102struct scrub_block { 103 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; 104 int page_count; 105 atomic_t outstanding_pages; 106 atomic_t ref_count; /* free mem on transition to zero */ 107 struct scrub_ctx *sctx; 108 struct { 109 unsigned int header_error:1; 110 unsigned int checksum_error:1; 111 unsigned int no_io_error_seen:1; 112 unsigned int generation_error:1; /* also sets header_error */ 113 }; 114}; 115 116struct scrub_wr_ctx { 117 struct scrub_bio *wr_curr_bio; 118 struct btrfs_device *tgtdev; 119 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */ 120 atomic_t flush_all_writes; 121 struct mutex wr_lock; 122}; 123 124struct scrub_ctx { 125 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX]; 126 struct btrfs_root *dev_root; 127 int first_free; 128 int curr; 129 atomic_t bios_in_flight; 130 atomic_t workers_pending; 131 spinlock_t list_lock; 132 wait_queue_head_t list_wait; 133 u16 csum_size; 134 struct list_head csum_list; 135 atomic_t cancel_req; 136 int readonly; 137 int pages_per_rd_bio; 138 u32 sectorsize; 139 u32 nodesize; 140 u32 leafsize; 141 142 int is_dev_replace; 143 struct scrub_wr_ctx wr_ctx; 144 145 /* 146 * statistics 147 */ 148 struct btrfs_scrub_progress stat; 149 spinlock_t stat_lock; 150}; 151 152struct scrub_fixup_nodatasum { 153 struct scrub_ctx *sctx; 154 struct btrfs_device *dev; 155 u64 logical; 156 struct btrfs_root *root; 157 struct btrfs_work work; 158 int mirror_num; 159}; 160 161struct scrub_copy_nocow_ctx { 162 struct scrub_ctx *sctx; 163 u64 logical; 164 u64 len; 165 int mirror_num; 166 u64 physical_for_dev_replace; 167 struct btrfs_work work; 168}; 169 170struct scrub_warning { 171 struct btrfs_path *path; 172 u64 extent_item_size; 173 char *scratch_buf; 174 char *msg_buf; 175 const char *errstr; 176 sector_t sector; 177 u64 logical; 178 struct btrfs_device *dev; 179 int msg_bufsize; 180 int scratch_bufsize; 181}; 182 183 184static void scrub_pending_bio_inc(struct scrub_ctx *sctx); 185static void scrub_pending_bio_dec(struct scrub_ctx *sctx); 186static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx); 187static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx); 188static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); 189static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 190 struct btrfs_fs_info *fs_info, 191 struct scrub_block *original_sblock, 192 u64 length, u64 logical, 193 struct scrub_block *sblocks_for_recheck); 194static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 195 struct scrub_block *sblock, int is_metadata, 196 int have_csum, u8 *csum, u64 generation, 197 u16 csum_size); 198static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, 199 struct scrub_block *sblock, 200 int is_metadata, int have_csum, 201 const u8 *csum, u64 generation, 202 u16 csum_size); 203static void scrub_complete_bio_end_io(struct bio *bio, int err); 204static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 205 struct scrub_block *sblock_good, 206 int force_write); 207static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, 208 struct scrub_block *sblock_good, 209 int page_num, int force_write); 210static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); 211static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, 212 int page_num); 213static int scrub_checksum_data(struct scrub_block *sblock); 214static int scrub_checksum_tree_block(struct scrub_block *sblock); 215static int scrub_checksum_super(struct scrub_block *sblock); 216static void scrub_block_get(struct scrub_block *sblock); 217static void scrub_block_put(struct scrub_block *sblock); 218static void scrub_page_get(struct scrub_page *spage); 219static void scrub_page_put(struct scrub_page *spage); 220static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, 221 struct scrub_page *spage); 222static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 223 u64 physical, struct btrfs_device *dev, u64 flags, 224 u64 gen, int mirror_num, u8 *csum, int force, 225 u64 physical_for_dev_replace); 226static void scrub_bio_end_io(struct bio *bio, int err); 227static void scrub_bio_end_io_worker(struct btrfs_work *work); 228static void scrub_block_complete(struct scrub_block *sblock); 229static void scrub_remap_extent(struct btrfs_fs_info *fs_info, 230 u64 extent_logical, u64 extent_len, 231 u64 *extent_physical, 232 struct btrfs_device **extent_dev, 233 int *extent_mirror_num); 234static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, 235 struct scrub_wr_ctx *wr_ctx, 236 struct btrfs_fs_info *fs_info, 237 struct btrfs_device *dev, 238 int is_dev_replace); 239static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx); 240static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, 241 struct scrub_page *spage); 242static void scrub_wr_submit(struct scrub_ctx *sctx); 243static void scrub_wr_bio_end_io(struct bio *bio, int err); 244static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); 245static int write_page_nocow(struct scrub_ctx *sctx, 246 u64 physical_for_dev_replace, struct page *page); 247static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, 248 void *ctx); 249static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 250 int mirror_num, u64 physical_for_dev_replace); 251static void copy_nocow_pages_worker(struct btrfs_work *work); 252 253 254static void scrub_pending_bio_inc(struct scrub_ctx *sctx) 255{ 256 atomic_inc(&sctx->bios_in_flight); 257} 258 259static void scrub_pending_bio_dec(struct scrub_ctx *sctx) 260{ 261 atomic_dec(&sctx->bios_in_flight); 262 wake_up(&sctx->list_wait); 263} 264 265/* 266 * used for workers that require transaction commits (i.e., for the 267 * NOCOW case) 268 */ 269static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) 270{ 271 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 272 273 /* 274 * increment scrubs_running to prevent cancel requests from 275 * completing as long as a worker is running. we must also 276 * increment scrubs_paused to prevent deadlocking on pause 277 * requests used for transactions commits (as the worker uses a 278 * transaction context). it is safe to regard the worker 279 * as paused for all matters practical. effectively, we only 280 * avoid cancellation requests from completing. 281 */ 282 mutex_lock(&fs_info->scrub_lock); 283 atomic_inc(&fs_info->scrubs_running); 284 atomic_inc(&fs_info->scrubs_paused); 285 mutex_unlock(&fs_info->scrub_lock); 286 atomic_inc(&sctx->workers_pending); 287} 288 289/* used for workers that require transaction commits */ 290static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) 291{ 292 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 293 294 /* 295 * see scrub_pending_trans_workers_inc() why we're pretending 296 * to be paused in the scrub counters 297 */ 298 mutex_lock(&fs_info->scrub_lock); 299 atomic_dec(&fs_info->scrubs_running); 300 atomic_dec(&fs_info->scrubs_paused); 301 mutex_unlock(&fs_info->scrub_lock); 302 atomic_dec(&sctx->workers_pending); 303 wake_up(&fs_info->scrub_pause_wait); 304 wake_up(&sctx->list_wait); 305} 306 307static void scrub_free_csums(struct scrub_ctx *sctx) 308{ 309 while (!list_empty(&sctx->csum_list)) { 310 struct btrfs_ordered_sum *sum; 311 sum = list_first_entry(&sctx->csum_list, 312 struct btrfs_ordered_sum, list); 313 list_del(&sum->list); 314 kfree(sum); 315 } 316} 317 318static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) 319{ 320 int i; 321 322 if (!sctx) 323 return; 324 325 scrub_free_wr_ctx(&sctx->wr_ctx); 326 327 /* this can happen when scrub is cancelled */ 328 if (sctx->curr != -1) { 329 struct scrub_bio *sbio = sctx->bios[sctx->curr]; 330 331 for (i = 0; i < sbio->page_count; i++) { 332 WARN_ON(!sbio->pagev[i]->page); 333 scrub_block_put(sbio->pagev[i]->sblock); 334 } 335 bio_put(sbio->bio); 336 } 337 338 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { 339 struct scrub_bio *sbio = sctx->bios[i]; 340 341 if (!sbio) 342 break; 343 kfree(sbio); 344 } 345 346 scrub_free_csums(sctx); 347 kfree(sctx); 348} 349 350static noinline_for_stack 351struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) 352{ 353 struct scrub_ctx *sctx; 354 int i; 355 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 356 int pages_per_rd_bio; 357 int ret; 358 359 /* 360 * the setting of pages_per_rd_bio is correct for scrub but might 361 * be wrong for the dev_replace code where we might read from 362 * different devices in the initial huge bios. However, that 363 * code is able to correctly handle the case when adding a page 364 * to a bio fails. 365 */ 366 if (dev->bdev) 367 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO, 368 bio_get_nr_vecs(dev->bdev)); 369 else 370 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; 371 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); 372 if (!sctx) 373 goto nomem; 374 sctx->is_dev_replace = is_dev_replace; 375 sctx->pages_per_rd_bio = pages_per_rd_bio; 376 sctx->curr = -1; 377 sctx->dev_root = dev->dev_root; 378 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { 379 struct scrub_bio *sbio; 380 381 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 382 if (!sbio) 383 goto nomem; 384 sctx->bios[i] = sbio; 385 386 sbio->index = i; 387 sbio->sctx = sctx; 388 sbio->page_count = 0; 389 sbio->work.func = scrub_bio_end_io_worker; 390 391 if (i != SCRUB_BIOS_PER_SCTX - 1) 392 sctx->bios[i]->next_free = i + 1; 393 else 394 sctx->bios[i]->next_free = -1; 395 } 396 sctx->first_free = 0; 397 sctx->nodesize = dev->dev_root->nodesize; 398 sctx->leafsize = dev->dev_root->leafsize; 399 sctx->sectorsize = dev->dev_root->sectorsize; 400 atomic_set(&sctx->bios_in_flight, 0); 401 atomic_set(&sctx->workers_pending, 0); 402 atomic_set(&sctx->cancel_req, 0); 403 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); 404 INIT_LIST_HEAD(&sctx->csum_list); 405 406 spin_lock_init(&sctx->list_lock); 407 spin_lock_init(&sctx->stat_lock); 408 init_waitqueue_head(&sctx->list_wait); 409 410 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info, 411 fs_info->dev_replace.tgtdev, is_dev_replace); 412 if (ret) { 413 scrub_free_ctx(sctx); 414 return ERR_PTR(ret); 415 } 416 return sctx; 417 418nomem: 419 scrub_free_ctx(sctx); 420 return ERR_PTR(-ENOMEM); 421} 422 423static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, 424 void *warn_ctx) 425{ 426 u64 isize; 427 u32 nlink; 428 int ret; 429 int i; 430 struct extent_buffer *eb; 431 struct btrfs_inode_item *inode_item; 432 struct scrub_warning *swarn = warn_ctx; 433 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info; 434 struct inode_fs_paths *ipath = NULL; 435 struct btrfs_root *local_root; 436 struct btrfs_key root_key; 437 438 root_key.objectid = root; 439 root_key.type = BTRFS_ROOT_ITEM_KEY; 440 root_key.offset = (u64)-1; 441 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); 442 if (IS_ERR(local_root)) { 443 ret = PTR_ERR(local_root); 444 goto err; 445 } 446 447 ret = inode_item_info(inum, 0, local_root, swarn->path); 448 if (ret) { 449 btrfs_release_path(swarn->path); 450 goto err; 451 } 452 453 eb = swarn->path->nodes[0]; 454 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], 455 struct btrfs_inode_item); 456 isize = btrfs_inode_size(eb, inode_item); 457 nlink = btrfs_inode_nlink(eb, inode_item); 458 btrfs_release_path(swarn->path); 459 460 ipath = init_ipath(4096, local_root, swarn->path); 461 if (IS_ERR(ipath)) { 462 ret = PTR_ERR(ipath); 463 ipath = NULL; 464 goto err; 465 } 466 ret = paths_from_inode(inum, ipath); 467 468 if (ret < 0) 469 goto err; 470 471 /* 472 * we deliberately ignore the bit ipath might have been too small to 473 * hold all of the paths here 474 */ 475 for (i = 0; i < ipath->fspath->elem_cnt; ++i) 476 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " 477 "%s, sector %llu, root %llu, inode %llu, offset %llu, " 478 "length %llu, links %u (path: %s)\n", swarn->errstr, 479 swarn->logical, rcu_str_deref(swarn->dev->name), 480 (unsigned long long)swarn->sector, root, inum, offset, 481 min(isize - offset, (u64)PAGE_SIZE), nlink, 482 (char *)(unsigned long)ipath->fspath->val[i]); 483 484 free_ipath(ipath); 485 return 0; 486 487err: 488 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " 489 "%s, sector %llu, root %llu, inode %llu, offset %llu: path " 490 "resolving failed with ret=%d\n", swarn->errstr, 491 swarn->logical, rcu_str_deref(swarn->dev->name), 492 (unsigned long long)swarn->sector, root, inum, offset, ret); 493 494 free_ipath(ipath); 495 return 0; 496} 497 498static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) 499{ 500 struct btrfs_device *dev; 501 struct btrfs_fs_info *fs_info; 502 struct btrfs_path *path; 503 struct btrfs_key found_key; 504 struct extent_buffer *eb; 505 struct btrfs_extent_item *ei; 506 struct scrub_warning swarn; 507 unsigned long ptr = 0; 508 u64 extent_item_pos; 509 u64 flags = 0; 510 u64 ref_root; 511 u32 item_size; 512 u8 ref_level; 513 const int bufsize = 4096; 514 int ret; 515 516 WARN_ON(sblock->page_count < 1); 517 dev = sblock->pagev[0]->dev; 518 fs_info = sblock->sctx->dev_root->fs_info; 519 520 path = btrfs_alloc_path(); 521 522 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS); 523 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS); 524 swarn.sector = (sblock->pagev[0]->physical) >> 9; 525 swarn.logical = sblock->pagev[0]->logical; 526 swarn.errstr = errstr; 527 swarn.dev = NULL; 528 swarn.msg_bufsize = bufsize; 529 swarn.scratch_bufsize = bufsize; 530 531 if (!path || !swarn.scratch_buf || !swarn.msg_buf) 532 goto out; 533 534 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, 535 &flags); 536 if (ret < 0) 537 goto out; 538 539 extent_item_pos = swarn.logical - found_key.objectid; 540 swarn.extent_item_size = found_key.offset; 541 542 eb = path->nodes[0]; 543 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 544 item_size = btrfs_item_size_nr(eb, path->slots[0]); 545 546 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 547 do { 548 ret = tree_backref_for_extent(&ptr, eb, ei, item_size, 549 &ref_root, &ref_level); 550 printk_in_rcu(KERN_WARNING 551 "btrfs: %s at logical %llu on dev %s, " 552 "sector %llu: metadata %s (level %d) in tree " 553 "%llu\n", errstr, swarn.logical, 554 rcu_str_deref(dev->name), 555 (unsigned long long)swarn.sector, 556 ref_level ? "node" : "leaf", 557 ret < 0 ? -1 : ref_level, 558 ret < 0 ? -1 : ref_root); 559 } while (ret != 1); 560 btrfs_release_path(path); 561 } else { 562 btrfs_release_path(path); 563 swarn.path = path; 564 swarn.dev = dev; 565 iterate_extent_inodes(fs_info, found_key.objectid, 566 extent_item_pos, 1, 567 scrub_print_warning_inode, &swarn); 568 } 569 570out: 571 btrfs_free_path(path); 572 kfree(swarn.scratch_buf); 573 kfree(swarn.msg_buf); 574} 575 576static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) 577{ 578 struct page *page = NULL; 579 unsigned long index; 580 struct scrub_fixup_nodatasum *fixup = fixup_ctx; 581 int ret; 582 int corrected = 0; 583 struct btrfs_key key; 584 struct inode *inode = NULL; 585 struct btrfs_fs_info *fs_info; 586 u64 end = offset + PAGE_SIZE - 1; 587 struct btrfs_root *local_root; 588 int srcu_index; 589 590 key.objectid = root; 591 key.type = BTRFS_ROOT_ITEM_KEY; 592 key.offset = (u64)-1; 593 594 fs_info = fixup->root->fs_info; 595 srcu_index = srcu_read_lock(&fs_info->subvol_srcu); 596 597 local_root = btrfs_read_fs_root_no_name(fs_info, &key); 598 if (IS_ERR(local_root)) { 599 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 600 return PTR_ERR(local_root); 601 } 602 603 key.type = BTRFS_INODE_ITEM_KEY; 604 key.objectid = inum; 605 key.offset = 0; 606 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); 607 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 608 if (IS_ERR(inode)) 609 return PTR_ERR(inode); 610 611 index = offset >> PAGE_CACHE_SHIFT; 612 613 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 614 if (!page) { 615 ret = -ENOMEM; 616 goto out; 617 } 618 619 if (PageUptodate(page)) { 620 if (PageDirty(page)) { 621 /* 622 * we need to write the data to the defect sector. the 623 * data that was in that sector is not in memory, 624 * because the page was modified. we must not write the 625 * modified page to that sector. 626 * 627 * TODO: what could be done here: wait for the delalloc 628 * runner to write out that page (might involve 629 * COW) and see whether the sector is still 630 * referenced afterwards. 631 * 632 * For the meantime, we'll treat this error 633 * incorrectable, although there is a chance that a 634 * later scrub will find the bad sector again and that 635 * there's no dirty page in memory, then. 636 */ 637 ret = -EIO; 638 goto out; 639 } 640 fs_info = BTRFS_I(inode)->root->fs_info; 641 ret = repair_io_failure(fs_info, offset, PAGE_SIZE, 642 fixup->logical, page, 643 fixup->mirror_num); 644 unlock_page(page); 645 corrected = !ret; 646 } else { 647 /* 648 * we need to get good data first. the general readpage path 649 * will call repair_io_failure for us, we just have to make 650 * sure we read the bad mirror. 651 */ 652 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, 653 EXTENT_DAMAGED, GFP_NOFS); 654 if (ret) { 655 /* set_extent_bits should give proper error */ 656 WARN_ON(ret > 0); 657 if (ret > 0) 658 ret = -EFAULT; 659 goto out; 660 } 661 662 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page, 663 btrfs_get_extent, 664 fixup->mirror_num); 665 wait_on_page_locked(page); 666 667 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset, 668 end, EXTENT_DAMAGED, 0, NULL); 669 if (!corrected) 670 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, 671 EXTENT_DAMAGED, GFP_NOFS); 672 } 673 674out: 675 if (page) 676 put_page(page); 677 if (inode) 678 iput(inode); 679 680 if (ret < 0) 681 return ret; 682 683 if (ret == 0 && corrected) { 684 /* 685 * we only need to call readpage for one of the inodes belonging 686 * to this extent. so make iterate_extent_inodes stop 687 */ 688 return 1; 689 } 690 691 return -EIO; 692} 693 694static void scrub_fixup_nodatasum(struct btrfs_work *work) 695{ 696 int ret; 697 struct scrub_fixup_nodatasum *fixup; 698 struct scrub_ctx *sctx; 699 struct btrfs_trans_handle *trans = NULL; 700 struct btrfs_fs_info *fs_info; 701 struct btrfs_path *path; 702 int uncorrectable = 0; 703 704 fixup = container_of(work, struct scrub_fixup_nodatasum, work); 705 sctx = fixup->sctx; 706 fs_info = fixup->root->fs_info; 707 708 path = btrfs_alloc_path(); 709 if (!path) { 710 spin_lock(&sctx->stat_lock); 711 ++sctx->stat.malloc_errors; 712 spin_unlock(&sctx->stat_lock); 713 uncorrectable = 1; 714 goto out; 715 } 716 717 trans = btrfs_join_transaction(fixup->root); 718 if (IS_ERR(trans)) { 719 uncorrectable = 1; 720 goto out; 721 } 722 723 /* 724 * the idea is to trigger a regular read through the standard path. we 725 * read a page from the (failed) logical address by specifying the 726 * corresponding copynum of the failed sector. thus, that readpage is 727 * expected to fail. 728 * that is the point where on-the-fly error correction will kick in 729 * (once it's finished) and rewrite the failed sector if a good copy 730 * can be found. 731 */ 732 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info, 733 path, scrub_fixup_readpage, 734 fixup); 735 if (ret < 0) { 736 uncorrectable = 1; 737 goto out; 738 } 739 WARN_ON(ret != 1); 740 741 spin_lock(&sctx->stat_lock); 742 ++sctx->stat.corrected_errors; 743 spin_unlock(&sctx->stat_lock); 744 745out: 746 if (trans && !IS_ERR(trans)) 747 btrfs_end_transaction(trans, fixup->root); 748 if (uncorrectable) { 749 spin_lock(&sctx->stat_lock); 750 ++sctx->stat.uncorrectable_errors; 751 spin_unlock(&sctx->stat_lock); 752 btrfs_dev_replace_stats_inc( 753 &sctx->dev_root->fs_info->dev_replace. 754 num_uncorrectable_read_errors); 755 printk_ratelimited_in_rcu(KERN_ERR 756 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", 757 fixup->logical, rcu_str_deref(fixup->dev->name)); 758 } 759 760 btrfs_free_path(path); 761 kfree(fixup); 762 763 scrub_pending_trans_workers_dec(sctx); 764} 765 766/* 767 * scrub_handle_errored_block gets called when either verification of the 768 * pages failed or the bio failed to read, e.g. with EIO. In the latter 769 * case, this function handles all pages in the bio, even though only one 770 * may be bad. 771 * The goal of this function is to repair the errored block by using the 772 * contents of one of the mirrors. 773 */ 774static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) 775{ 776 struct scrub_ctx *sctx = sblock_to_check->sctx; 777 struct btrfs_device *dev; 778 struct btrfs_fs_info *fs_info; 779 u64 length; 780 u64 logical; 781 u64 generation; 782 unsigned int failed_mirror_index; 783 unsigned int is_metadata; 784 unsigned int have_csum; 785 u8 *csum; 786 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ 787 struct scrub_block *sblock_bad; 788 int ret; 789 int mirror_index; 790 int page_num; 791 int success; 792 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 793 DEFAULT_RATELIMIT_BURST); 794 795 BUG_ON(sblock_to_check->page_count < 1); 796 fs_info = sctx->dev_root->fs_info; 797 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { 798 /* 799 * if we find an error in a super block, we just report it. 800 * They will get written with the next transaction commit 801 * anyway 802 */ 803 spin_lock(&sctx->stat_lock); 804 ++sctx->stat.super_errors; 805 spin_unlock(&sctx->stat_lock); 806 return 0; 807 } 808 length = sblock_to_check->page_count * PAGE_SIZE; 809 logical = sblock_to_check->pagev[0]->logical; 810 generation = sblock_to_check->pagev[0]->generation; 811 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1); 812 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1; 813 is_metadata = !(sblock_to_check->pagev[0]->flags & 814 BTRFS_EXTENT_FLAG_DATA); 815 have_csum = sblock_to_check->pagev[0]->have_csum; 816 csum = sblock_to_check->pagev[0]->csum; 817 dev = sblock_to_check->pagev[0]->dev; 818 819 if (sctx->is_dev_replace && !is_metadata && !have_csum) { 820 sblocks_for_recheck = NULL; 821 goto nodatasum_case; 822 } 823 824 /* 825 * read all mirrors one after the other. This includes to 826 * re-read the extent or metadata block that failed (that was 827 * the cause that this fixup code is called) another time, 828 * page by page this time in order to know which pages 829 * caused I/O errors and which ones are good (for all mirrors). 830 * It is the goal to handle the situation when more than one 831 * mirror contains I/O errors, but the errors do not 832 * overlap, i.e. the data can be repaired by selecting the 833 * pages from those mirrors without I/O error on the 834 * particular pages. One example (with blocks >= 2 * PAGE_SIZE) 835 * would be that mirror #1 has an I/O error on the first page, 836 * the second page is good, and mirror #2 has an I/O error on 837 * the second page, but the first page is good. 838 * Then the first page of the first mirror can be repaired by 839 * taking the first page of the second mirror, and the 840 * second page of the second mirror can be repaired by 841 * copying the contents of the 2nd page of the 1st mirror. 842 * One more note: if the pages of one mirror contain I/O 843 * errors, the checksum cannot be verified. In order to get 844 * the best data for repairing, the first attempt is to find 845 * a mirror without I/O errors and with a validated checksum. 846 * Only if this is not possible, the pages are picked from 847 * mirrors with I/O errors without considering the checksum. 848 * If the latter is the case, at the end, the checksum of the 849 * repaired area is verified in order to correctly maintain 850 * the statistics. 851 */ 852 853 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS * 854 sizeof(*sblocks_for_recheck), 855 GFP_NOFS); 856 if (!sblocks_for_recheck) { 857 spin_lock(&sctx->stat_lock); 858 sctx->stat.malloc_errors++; 859 sctx->stat.read_errors++; 860 sctx->stat.uncorrectable_errors++; 861 spin_unlock(&sctx->stat_lock); 862 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 863 goto out; 864 } 865 866 /* setup the context, map the logical blocks and alloc the pages */ 867 ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length, 868 logical, sblocks_for_recheck); 869 if (ret) { 870 spin_lock(&sctx->stat_lock); 871 sctx->stat.read_errors++; 872 sctx->stat.uncorrectable_errors++; 873 spin_unlock(&sctx->stat_lock); 874 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 875 goto out; 876 } 877 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); 878 sblock_bad = sblocks_for_recheck + failed_mirror_index; 879 880 /* build and submit the bios for the failed mirror, check checksums */ 881 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum, 882 csum, generation, sctx->csum_size); 883 884 if (!sblock_bad->header_error && !sblock_bad->checksum_error && 885 sblock_bad->no_io_error_seen) { 886 /* 887 * the error disappeared after reading page by page, or 888 * the area was part of a huge bio and other parts of the 889 * bio caused I/O errors, or the block layer merged several 890 * read requests into one and the error is caused by a 891 * different bio (usually one of the two latter cases is 892 * the cause) 893 */ 894 spin_lock(&sctx->stat_lock); 895 sctx->stat.unverified_errors++; 896 spin_unlock(&sctx->stat_lock); 897 898 if (sctx->is_dev_replace) 899 scrub_write_block_to_dev_replace(sblock_bad); 900 goto out; 901 } 902 903 if (!sblock_bad->no_io_error_seen) { 904 spin_lock(&sctx->stat_lock); 905 sctx->stat.read_errors++; 906 spin_unlock(&sctx->stat_lock); 907 if (__ratelimit(&_rs)) 908 scrub_print_warning("i/o error", sblock_to_check); 909 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 910 } else if (sblock_bad->checksum_error) { 911 spin_lock(&sctx->stat_lock); 912 sctx->stat.csum_errors++; 913 spin_unlock(&sctx->stat_lock); 914 if (__ratelimit(&_rs)) 915 scrub_print_warning("checksum error", sblock_to_check); 916 btrfs_dev_stat_inc_and_print(dev, 917 BTRFS_DEV_STAT_CORRUPTION_ERRS); 918 } else if (sblock_bad->header_error) { 919 spin_lock(&sctx->stat_lock); 920 sctx->stat.verify_errors++; 921 spin_unlock(&sctx->stat_lock); 922 if (__ratelimit(&_rs)) 923 scrub_print_warning("checksum/header error", 924 sblock_to_check); 925 if (sblock_bad->generation_error) 926 btrfs_dev_stat_inc_and_print(dev, 927 BTRFS_DEV_STAT_GENERATION_ERRS); 928 else 929 btrfs_dev_stat_inc_and_print(dev, 930 BTRFS_DEV_STAT_CORRUPTION_ERRS); 931 } 932 933 if (sctx->readonly && !sctx->is_dev_replace) 934 goto did_not_correct_error; 935 936 if (!is_metadata && !have_csum) { 937 struct scrub_fixup_nodatasum *fixup_nodatasum; 938 939nodatasum_case: 940 WARN_ON(sctx->is_dev_replace); 941 942 /* 943 * !is_metadata and !have_csum, this means that the data 944 * might not be COW'ed, that it might be modified 945 * concurrently. The general strategy to work on the 946 * commit root does not help in the case when COW is not 947 * used. 948 */ 949 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS); 950 if (!fixup_nodatasum) 951 goto did_not_correct_error; 952 fixup_nodatasum->sctx = sctx; 953 fixup_nodatasum->dev = dev; 954 fixup_nodatasum->logical = logical; 955 fixup_nodatasum->root = fs_info->extent_root; 956 fixup_nodatasum->mirror_num = failed_mirror_index + 1; 957 scrub_pending_trans_workers_inc(sctx); 958 fixup_nodatasum->work.func = scrub_fixup_nodatasum; 959 btrfs_queue_worker(&fs_info->scrub_workers, 960 &fixup_nodatasum->work); 961 goto out; 962 } 963 964 /* 965 * now build and submit the bios for the other mirrors, check 966 * checksums. 967 * First try to pick the mirror which is completely without I/O 968 * errors and also does not have a checksum error. 969 * If one is found, and if a checksum is present, the full block 970 * that is known to contain an error is rewritten. Afterwards 971 * the block is known to be corrected. 972 * If a mirror is found which is completely correct, and no 973 * checksum is present, only those pages are rewritten that had 974 * an I/O error in the block to be repaired, since it cannot be 975 * determined, which copy of the other pages is better (and it 976 * could happen otherwise that a correct page would be 977 * overwritten by a bad one). 978 */ 979 for (mirror_index = 0; 980 mirror_index < BTRFS_MAX_MIRRORS && 981 sblocks_for_recheck[mirror_index].page_count > 0; 982 mirror_index++) { 983 struct scrub_block *sblock_other; 984 985 if (mirror_index == failed_mirror_index) 986 continue; 987 sblock_other = sblocks_for_recheck + mirror_index; 988 989 /* build and submit the bios, check checksums */ 990 scrub_recheck_block(fs_info, sblock_other, is_metadata, 991 have_csum, csum, generation, 992 sctx->csum_size); 993 994 if (!sblock_other->header_error && 995 !sblock_other->checksum_error && 996 sblock_other->no_io_error_seen) { 997 if (sctx->is_dev_replace) { 998 scrub_write_block_to_dev_replace(sblock_other); 999 } else { 1000 int force_write = is_metadata || have_csum; 1001 1002 ret = scrub_repair_block_from_good_copy( 1003 sblock_bad, sblock_other, 1004 force_write); 1005 } 1006 if (0 == ret) 1007 goto corrected_error; 1008 } 1009 } 1010 1011 /* 1012 * for dev_replace, pick good pages and write to the target device. 1013 */ 1014 if (sctx->is_dev_replace) { 1015 success = 1; 1016 for (page_num = 0; page_num < sblock_bad->page_count; 1017 page_num++) { 1018 int sub_success; 1019 1020 sub_success = 0; 1021 for (mirror_index = 0; 1022 mirror_index < BTRFS_MAX_MIRRORS && 1023 sblocks_for_recheck[mirror_index].page_count > 0; 1024 mirror_index++) { 1025 struct scrub_block *sblock_other = 1026 sblocks_for_recheck + mirror_index; 1027 struct scrub_page *page_other = 1028 sblock_other->pagev[page_num]; 1029 1030 if (!page_other->io_error) { 1031 ret = scrub_write_page_to_dev_replace( 1032 sblock_other, page_num); 1033 if (ret == 0) { 1034 /* succeeded for this page */ 1035 sub_success = 1; 1036 break; 1037 } else { 1038 btrfs_dev_replace_stats_inc( 1039 &sctx->dev_root-> 1040 fs_info->dev_replace. 1041 num_write_errors); 1042 } 1043 } 1044 } 1045 1046 if (!sub_success) { 1047 /* 1048 * did not find a mirror to fetch the page 1049 * from. scrub_write_page_to_dev_replace() 1050 * handles this case (page->io_error), by 1051 * filling the block with zeros before 1052 * submitting the write request 1053 */ 1054 success = 0; 1055 ret = scrub_write_page_to_dev_replace( 1056 sblock_bad, page_num); 1057 if (ret) 1058 btrfs_dev_replace_stats_inc( 1059 &sctx->dev_root->fs_info-> 1060 dev_replace.num_write_errors); 1061 } 1062 } 1063 1064 goto out; 1065 } 1066 1067 /* 1068 * for regular scrub, repair those pages that are errored. 1069 * In case of I/O errors in the area that is supposed to be 1070 * repaired, continue by picking good copies of those pages. 1071 * Select the good pages from mirrors to rewrite bad pages from 1072 * the area to fix. Afterwards verify the checksum of the block 1073 * that is supposed to be repaired. This verification step is 1074 * only done for the purpose of statistic counting and for the 1075 * final scrub report, whether errors remain. 1076 * A perfect algorithm could make use of the checksum and try 1077 * all possible combinations of pages from the different mirrors 1078 * until the checksum verification succeeds. For example, when 1079 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page 1080 * of mirror #2 is readable but the final checksum test fails, 1081 * then the 2nd page of mirror #3 could be tried, whether now 1082 * the final checksum succeedes. But this would be a rare 1083 * exception and is therefore not implemented. At least it is 1084 * avoided that the good copy is overwritten. 1085 * A more useful improvement would be to pick the sectors 1086 * without I/O error based on sector sizes (512 bytes on legacy 1087 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one 1088 * mirror could be repaired by taking 512 byte of a different 1089 * mirror, even if other 512 byte sectors in the same PAGE_SIZE 1090 * area are unreadable. 1091 */ 1092 1093 /* can only fix I/O errors from here on */ 1094 if (sblock_bad->no_io_error_seen) 1095 goto did_not_correct_error; 1096 1097 success = 1; 1098 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { 1099 struct scrub_page *page_bad = sblock_bad->pagev[page_num]; 1100 1101 if (!page_bad->io_error) 1102 continue; 1103 1104 for (mirror_index = 0; 1105 mirror_index < BTRFS_MAX_MIRRORS && 1106 sblocks_for_recheck[mirror_index].page_count > 0; 1107 mirror_index++) { 1108 struct scrub_block *sblock_other = sblocks_for_recheck + 1109 mirror_index; 1110 struct scrub_page *page_other = sblock_other->pagev[ 1111 page_num]; 1112 1113 if (!page_other->io_error) { 1114 ret = scrub_repair_page_from_good_copy( 1115 sblock_bad, sblock_other, page_num, 0); 1116 if (0 == ret) { 1117 page_bad->io_error = 0; 1118 break; /* succeeded for this page */ 1119 } 1120 } 1121 } 1122 1123 if (page_bad->io_error) { 1124 /* did not find a mirror to copy the page from */ 1125 success = 0; 1126 } 1127 } 1128 1129 if (success) { 1130 if (is_metadata || have_csum) { 1131 /* 1132 * need to verify the checksum now that all 1133 * sectors on disk are repaired (the write 1134 * request for data to be repaired is on its way). 1135 * Just be lazy and use scrub_recheck_block() 1136 * which re-reads the data before the checksum 1137 * is verified, but most likely the data comes out 1138 * of the page cache. 1139 */ 1140 scrub_recheck_block(fs_info, sblock_bad, 1141 is_metadata, have_csum, csum, 1142 generation, sctx->csum_size); 1143 if (!sblock_bad->header_error && 1144 !sblock_bad->checksum_error && 1145 sblock_bad->no_io_error_seen) 1146 goto corrected_error; 1147 else 1148 goto did_not_correct_error; 1149 } else { 1150corrected_error: 1151 spin_lock(&sctx->stat_lock); 1152 sctx->stat.corrected_errors++; 1153 spin_unlock(&sctx->stat_lock); 1154 printk_ratelimited_in_rcu(KERN_ERR 1155 "btrfs: fixed up error at logical %llu on dev %s\n", 1156 logical, rcu_str_deref(dev->name)); 1157 } 1158 } else { 1159did_not_correct_error: 1160 spin_lock(&sctx->stat_lock); 1161 sctx->stat.uncorrectable_errors++; 1162 spin_unlock(&sctx->stat_lock); 1163 printk_ratelimited_in_rcu(KERN_ERR 1164 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", 1165 logical, rcu_str_deref(dev->name)); 1166 } 1167 1168out: 1169 if (sblocks_for_recheck) { 1170 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; 1171 mirror_index++) { 1172 struct scrub_block *sblock = sblocks_for_recheck + 1173 mirror_index; 1174 int page_index; 1175 1176 for (page_index = 0; page_index < sblock->page_count; 1177 page_index++) { 1178 sblock->pagev[page_index]->sblock = NULL; 1179 scrub_page_put(sblock->pagev[page_index]); 1180 } 1181 } 1182 kfree(sblocks_for_recheck); 1183 } 1184 1185 return 0; 1186} 1187 1188static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 1189 struct btrfs_fs_info *fs_info, 1190 struct scrub_block *original_sblock, 1191 u64 length, u64 logical, 1192 struct scrub_block *sblocks_for_recheck) 1193{ 1194 int page_index; 1195 int mirror_index; 1196 int ret; 1197 1198 /* 1199 * note: the two members ref_count and outstanding_pages 1200 * are not used (and not set) in the blocks that are used for 1201 * the recheck procedure 1202 */ 1203 1204 page_index = 0; 1205 while (length > 0) { 1206 u64 sublen = min_t(u64, length, PAGE_SIZE); 1207 u64 mapped_length = sublen; 1208 struct btrfs_bio *bbio = NULL; 1209 1210 /* 1211 * with a length of PAGE_SIZE, each returned stripe 1212 * represents one mirror 1213 */ 1214 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, 1215 &mapped_length, &bbio, 0); 1216 if (ret || !bbio || mapped_length < sublen) { 1217 kfree(bbio); 1218 return -EIO; 1219 } 1220 1221 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); 1222 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes; 1223 mirror_index++) { 1224 struct scrub_block *sblock; 1225 struct scrub_page *page; 1226 1227 if (mirror_index >= BTRFS_MAX_MIRRORS) 1228 continue; 1229 1230 sblock = sblocks_for_recheck + mirror_index; 1231 sblock->sctx = sctx; 1232 page = kzalloc(sizeof(*page), GFP_NOFS); 1233 if (!page) { 1234leave_nomem: 1235 spin_lock(&sctx->stat_lock); 1236 sctx->stat.malloc_errors++; 1237 spin_unlock(&sctx->stat_lock); 1238 kfree(bbio); 1239 return -ENOMEM; 1240 } 1241 scrub_page_get(page); 1242 sblock->pagev[page_index] = page; 1243 page->logical = logical; 1244 page->physical = bbio->stripes[mirror_index].physical; 1245 BUG_ON(page_index >= original_sblock->page_count); 1246 page->physical_for_dev_replace = 1247 original_sblock->pagev[page_index]-> 1248 physical_for_dev_replace; 1249 /* for missing devices, dev->bdev is NULL */ 1250 page->dev = bbio->stripes[mirror_index].dev; 1251 page->mirror_num = mirror_index + 1; 1252 sblock->page_count++; 1253 page->page = alloc_page(GFP_NOFS); 1254 if (!page->page) 1255 goto leave_nomem; 1256 } 1257 kfree(bbio); 1258 length -= sublen; 1259 logical += sublen; 1260 page_index++; 1261 } 1262 1263 return 0; 1264} 1265 1266/* 1267 * this function will check the on disk data for checksum errors, header 1268 * errors and read I/O errors. If any I/O errors happen, the exact pages 1269 * which are errored are marked as being bad. The goal is to enable scrub 1270 * to take those pages that are not errored from all the mirrors so that 1271 * the pages that are errored in the just handled mirror can be repaired. 1272 */ 1273static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 1274 struct scrub_block *sblock, int is_metadata, 1275 int have_csum, u8 *csum, u64 generation, 1276 u16 csum_size) 1277{ 1278 int page_num; 1279 1280 sblock->no_io_error_seen = 1; 1281 sblock->header_error = 0; 1282 sblock->checksum_error = 0; 1283 1284 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1285 struct bio *bio; 1286 struct scrub_page *page = sblock->pagev[page_num]; 1287 DECLARE_COMPLETION_ONSTACK(complete); 1288 1289 if (page->dev->bdev == NULL) { 1290 page->io_error = 1; 1291 sblock->no_io_error_seen = 0; 1292 continue; 1293 } 1294 1295 WARN_ON(!page->page); 1296 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1297 if (!bio) { 1298 page->io_error = 1; 1299 sblock->no_io_error_seen = 0; 1300 continue; 1301 } 1302 bio->bi_bdev = page->dev->bdev; 1303 bio->bi_sector = page->physical >> 9; 1304 bio->bi_end_io = scrub_complete_bio_end_io; 1305 bio->bi_private = &complete; 1306 1307 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1308 btrfsic_submit_bio(READ, bio); 1309 1310 /* this will also unplug the queue */ 1311 wait_for_completion(&complete); 1312 1313 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags); 1314 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1315 sblock->no_io_error_seen = 0; 1316 bio_put(bio); 1317 } 1318 1319 if (sblock->no_io_error_seen) 1320 scrub_recheck_block_checksum(fs_info, sblock, is_metadata, 1321 have_csum, csum, generation, 1322 csum_size); 1323 1324 return; 1325} 1326 1327static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, 1328 struct scrub_block *sblock, 1329 int is_metadata, int have_csum, 1330 const u8 *csum, u64 generation, 1331 u16 csum_size) 1332{ 1333 int page_num; 1334 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1335 u32 crc = ~(u32)0; 1336 void *mapped_buffer; 1337 1338 WARN_ON(!sblock->pagev[0]->page); 1339 if (is_metadata) { 1340 struct btrfs_header *h; 1341 1342 mapped_buffer = kmap_atomic(sblock->pagev[0]->page); 1343 h = (struct btrfs_header *)mapped_buffer; 1344 1345 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) || 1346 memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) || 1347 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 1348 BTRFS_UUID_SIZE)) { 1349 sblock->header_error = 1; 1350 } else if (generation != btrfs_stack_header_generation(h)) { 1351 sblock->header_error = 1; 1352 sblock->generation_error = 1; 1353 } 1354 csum = h->csum; 1355 } else { 1356 if (!have_csum) 1357 return; 1358 1359 mapped_buffer = kmap_atomic(sblock->pagev[0]->page); 1360 } 1361 1362 for (page_num = 0;;) { 1363 if (page_num == 0 && is_metadata) 1364 crc = btrfs_csum_data( 1365 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE, 1366 crc, PAGE_SIZE - BTRFS_CSUM_SIZE); 1367 else 1368 crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE); 1369 1370 kunmap_atomic(mapped_buffer); 1371 page_num++; 1372 if (page_num >= sblock->page_count) 1373 break; 1374 WARN_ON(!sblock->pagev[page_num]->page); 1375 1376 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page); 1377 } 1378 1379 btrfs_csum_final(crc, calculated_csum); 1380 if (memcmp(calculated_csum, csum, csum_size)) 1381 sblock->checksum_error = 1; 1382} 1383 1384static void scrub_complete_bio_end_io(struct bio *bio, int err) 1385{ 1386 complete((struct completion *)bio->bi_private); 1387} 1388 1389static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1390 struct scrub_block *sblock_good, 1391 int force_write) 1392{ 1393 int page_num; 1394 int ret = 0; 1395 1396 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { 1397 int ret_sub; 1398 1399 ret_sub = scrub_repair_page_from_good_copy(sblock_bad, 1400 sblock_good, 1401 page_num, 1402 force_write); 1403 if (ret_sub) 1404 ret = ret_sub; 1405 } 1406 1407 return ret; 1408} 1409 1410static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, 1411 struct scrub_block *sblock_good, 1412 int page_num, int force_write) 1413{ 1414 struct scrub_page *page_bad = sblock_bad->pagev[page_num]; 1415 struct scrub_page *page_good = sblock_good->pagev[page_num]; 1416 1417 BUG_ON(page_bad->page == NULL); 1418 BUG_ON(page_good->page == NULL); 1419 if (force_write || sblock_bad->header_error || 1420 sblock_bad->checksum_error || page_bad->io_error) { 1421 struct bio *bio; 1422 int ret; 1423 DECLARE_COMPLETION_ONSTACK(complete); 1424 1425 if (!page_bad->dev->bdev) { 1426 printk_ratelimited(KERN_WARNING 1427 "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n"); 1428 return -EIO; 1429 } 1430 1431 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1432 if (!bio) 1433 return -EIO; 1434 bio->bi_bdev = page_bad->dev->bdev; 1435 bio->bi_sector = page_bad->physical >> 9; 1436 bio->bi_end_io = scrub_complete_bio_end_io; 1437 bio->bi_private = &complete; 1438 1439 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1440 if (PAGE_SIZE != ret) { 1441 bio_put(bio); 1442 return -EIO; 1443 } 1444 btrfsic_submit_bio(WRITE, bio); 1445 1446 /* this will also unplug the queue */ 1447 wait_for_completion(&complete); 1448 if (!bio_flagged(bio, BIO_UPTODATE)) { 1449 btrfs_dev_stat_inc_and_print(page_bad->dev, 1450 BTRFS_DEV_STAT_WRITE_ERRS); 1451 btrfs_dev_replace_stats_inc( 1452 &sblock_bad->sctx->dev_root->fs_info-> 1453 dev_replace.num_write_errors); 1454 bio_put(bio); 1455 return -EIO; 1456 } 1457 bio_put(bio); 1458 } 1459 1460 return 0; 1461} 1462 1463static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) 1464{ 1465 int page_num; 1466 1467 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1468 int ret; 1469 1470 ret = scrub_write_page_to_dev_replace(sblock, page_num); 1471 if (ret) 1472 btrfs_dev_replace_stats_inc( 1473 &sblock->sctx->dev_root->fs_info->dev_replace. 1474 num_write_errors); 1475 } 1476} 1477 1478static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, 1479 int page_num) 1480{ 1481 struct scrub_page *spage = sblock->pagev[page_num]; 1482 1483 BUG_ON(spage->page == NULL); 1484 if (spage->io_error) { 1485 void *mapped_buffer = kmap_atomic(spage->page); 1486 1487 memset(mapped_buffer, 0, PAGE_CACHE_SIZE); 1488 flush_dcache_page(spage->page); 1489 kunmap_atomic(mapped_buffer); 1490 } 1491 return scrub_add_page_to_wr_bio(sblock->sctx, spage); 1492} 1493 1494static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, 1495 struct scrub_page *spage) 1496{ 1497 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; 1498 struct scrub_bio *sbio; 1499 int ret; 1500 1501 mutex_lock(&wr_ctx->wr_lock); 1502again: 1503 if (!wr_ctx->wr_curr_bio) { 1504 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio), 1505 GFP_NOFS); 1506 if (!wr_ctx->wr_curr_bio) { 1507 mutex_unlock(&wr_ctx->wr_lock); 1508 return -ENOMEM; 1509 } 1510 wr_ctx->wr_curr_bio->sctx = sctx; 1511 wr_ctx->wr_curr_bio->page_count = 0; 1512 } 1513 sbio = wr_ctx->wr_curr_bio; 1514 if (sbio->page_count == 0) { 1515 struct bio *bio; 1516 1517 sbio->physical = spage->physical_for_dev_replace; 1518 sbio->logical = spage->logical; 1519 sbio->dev = wr_ctx->tgtdev; 1520 bio = sbio->bio; 1521 if (!bio) { 1522 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); 1523 if (!bio) { 1524 mutex_unlock(&wr_ctx->wr_lock); 1525 return -ENOMEM; 1526 } 1527 sbio->bio = bio; 1528 } 1529 1530 bio->bi_private = sbio; 1531 bio->bi_end_io = scrub_wr_bio_end_io; 1532 bio->bi_bdev = sbio->dev->bdev; 1533 bio->bi_sector = sbio->physical >> 9; 1534 sbio->err = 0; 1535 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1536 spage->physical_for_dev_replace || 1537 sbio->logical + sbio->page_count * PAGE_SIZE != 1538 spage->logical) { 1539 scrub_wr_submit(sctx); 1540 goto again; 1541 } 1542 1543 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); 1544 if (ret != PAGE_SIZE) { 1545 if (sbio->page_count < 1) { 1546 bio_put(sbio->bio); 1547 sbio->bio = NULL; 1548 mutex_unlock(&wr_ctx->wr_lock); 1549 return -EIO; 1550 } 1551 scrub_wr_submit(sctx); 1552 goto again; 1553 } 1554 1555 sbio->pagev[sbio->page_count] = spage; 1556 scrub_page_get(spage); 1557 sbio->page_count++; 1558 if (sbio->page_count == wr_ctx->pages_per_wr_bio) 1559 scrub_wr_submit(sctx); 1560 mutex_unlock(&wr_ctx->wr_lock); 1561 1562 return 0; 1563} 1564 1565static void scrub_wr_submit(struct scrub_ctx *sctx) 1566{ 1567 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; 1568 struct scrub_bio *sbio; 1569 1570 if (!wr_ctx->wr_curr_bio) 1571 return; 1572 1573 sbio = wr_ctx->wr_curr_bio; 1574 wr_ctx->wr_curr_bio = NULL; 1575 WARN_ON(!sbio->bio->bi_bdev); 1576 scrub_pending_bio_inc(sctx); 1577 /* process all writes in a single worker thread. Then the block layer 1578 * orders the requests before sending them to the driver which 1579 * doubled the write performance on spinning disks when measured 1580 * with Linux 3.5 */ 1581 btrfsic_submit_bio(WRITE, sbio->bio); 1582} 1583 1584static void scrub_wr_bio_end_io(struct bio *bio, int err) 1585{ 1586 struct scrub_bio *sbio = bio->bi_private; 1587 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info; 1588 1589 sbio->err = err; 1590 sbio->bio = bio; 1591 1592 sbio->work.func = scrub_wr_bio_end_io_worker; 1593 btrfs_queue_worker(&fs_info->scrub_wr_completion_workers, &sbio->work); 1594} 1595 1596static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) 1597{ 1598 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 1599 struct scrub_ctx *sctx = sbio->sctx; 1600 int i; 1601 1602 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); 1603 if (sbio->err) { 1604 struct btrfs_dev_replace *dev_replace = 1605 &sbio->sctx->dev_root->fs_info->dev_replace; 1606 1607 for (i = 0; i < sbio->page_count; i++) { 1608 struct scrub_page *spage = sbio->pagev[i]; 1609 1610 spage->io_error = 1; 1611 btrfs_dev_replace_stats_inc(&dev_replace-> 1612 num_write_errors); 1613 } 1614 } 1615 1616 for (i = 0; i < sbio->page_count; i++) 1617 scrub_page_put(sbio->pagev[i]); 1618 1619 bio_put(sbio->bio); 1620 kfree(sbio); 1621 scrub_pending_bio_dec(sctx); 1622} 1623 1624static int scrub_checksum(struct scrub_block *sblock) 1625{ 1626 u64 flags; 1627 int ret; 1628 1629 WARN_ON(sblock->page_count < 1); 1630 flags = sblock->pagev[0]->flags; 1631 ret = 0; 1632 if (flags & BTRFS_EXTENT_FLAG_DATA) 1633 ret = scrub_checksum_data(sblock); 1634 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1635 ret = scrub_checksum_tree_block(sblock); 1636 else if (flags & BTRFS_EXTENT_FLAG_SUPER) 1637 (void)scrub_checksum_super(sblock); 1638 else 1639 WARN_ON(1); 1640 if (ret) 1641 scrub_handle_errored_block(sblock); 1642 1643 return ret; 1644} 1645 1646static int scrub_checksum_data(struct scrub_block *sblock) 1647{ 1648 struct scrub_ctx *sctx = sblock->sctx; 1649 u8 csum[BTRFS_CSUM_SIZE]; 1650 u8 *on_disk_csum; 1651 struct page *page; 1652 void *buffer; 1653 u32 crc = ~(u32)0; 1654 int fail = 0; 1655 u64 len; 1656 int index; 1657 1658 BUG_ON(sblock->page_count < 1); 1659 if (!sblock->pagev[0]->have_csum) 1660 return 0; 1661 1662 on_disk_csum = sblock->pagev[0]->csum; 1663 page = sblock->pagev[0]->page; 1664 buffer = kmap_atomic(page); 1665 1666 len = sctx->sectorsize; 1667 index = 0; 1668 for (;;) { 1669 u64 l = min_t(u64, len, PAGE_SIZE); 1670 1671 crc = btrfs_csum_data(buffer, crc, l); 1672 kunmap_atomic(buffer); 1673 len -= l; 1674 if (len == 0) 1675 break; 1676 index++; 1677 BUG_ON(index >= sblock->page_count); 1678 BUG_ON(!sblock->pagev[index]->page); 1679 page = sblock->pagev[index]->page; 1680 buffer = kmap_atomic(page); 1681 } 1682 1683 btrfs_csum_final(crc, csum); 1684 if (memcmp(csum, on_disk_csum, sctx->csum_size)) 1685 fail = 1; 1686 1687 return fail; 1688} 1689 1690static int scrub_checksum_tree_block(struct scrub_block *sblock) 1691{ 1692 struct scrub_ctx *sctx = sblock->sctx; 1693 struct btrfs_header *h; 1694 struct btrfs_root *root = sctx->dev_root; 1695 struct btrfs_fs_info *fs_info = root->fs_info; 1696 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1697 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1698 struct page *page; 1699 void *mapped_buffer; 1700 u64 mapped_size; 1701 void *p; 1702 u32 crc = ~(u32)0; 1703 int fail = 0; 1704 int crc_fail = 0; 1705 u64 len; 1706 int index; 1707 1708 BUG_ON(sblock->page_count < 1); 1709 page = sblock->pagev[0]->page; 1710 mapped_buffer = kmap_atomic(page); 1711 h = (struct btrfs_header *)mapped_buffer; 1712 memcpy(on_disk_csum, h->csum, sctx->csum_size); 1713 1714 /* 1715 * we don't use the getter functions here, as we 1716 * a) don't have an extent buffer and 1717 * b) the page is already kmapped 1718 */ 1719 1720 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h)) 1721 ++fail; 1722 1723 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) 1724 ++fail; 1725 1726 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1727 ++fail; 1728 1729 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 1730 BTRFS_UUID_SIZE)) 1731 ++fail; 1732 1733 WARN_ON(sctx->nodesize != sctx->leafsize); 1734 len = sctx->nodesize - BTRFS_CSUM_SIZE; 1735 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; 1736 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; 1737 index = 0; 1738 for (;;) { 1739 u64 l = min_t(u64, len, mapped_size); 1740 1741 crc = btrfs_csum_data(p, crc, l); 1742 kunmap_atomic(mapped_buffer); 1743 len -= l; 1744 if (len == 0) 1745 break; 1746 index++; 1747 BUG_ON(index >= sblock->page_count); 1748 BUG_ON(!sblock->pagev[index]->page); 1749 page = sblock->pagev[index]->page; 1750 mapped_buffer = kmap_atomic(page); 1751 mapped_size = PAGE_SIZE; 1752 p = mapped_buffer; 1753 } 1754 1755 btrfs_csum_final(crc, calculated_csum); 1756 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) 1757 ++crc_fail; 1758 1759 return fail || crc_fail; 1760} 1761 1762static int scrub_checksum_super(struct scrub_block *sblock) 1763{ 1764 struct btrfs_super_block *s; 1765 struct scrub_ctx *sctx = sblock->sctx; 1766 struct btrfs_root *root = sctx->dev_root; 1767 struct btrfs_fs_info *fs_info = root->fs_info; 1768 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1769 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1770 struct page *page; 1771 void *mapped_buffer; 1772 u64 mapped_size; 1773 void *p; 1774 u32 crc = ~(u32)0; 1775 int fail_gen = 0; 1776 int fail_cor = 0; 1777 u64 len; 1778 int index; 1779 1780 BUG_ON(sblock->page_count < 1); 1781 page = sblock->pagev[0]->page; 1782 mapped_buffer = kmap_atomic(page); 1783 s = (struct btrfs_super_block *)mapped_buffer; 1784 memcpy(on_disk_csum, s->csum, sctx->csum_size); 1785 1786 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s)) 1787 ++fail_cor; 1788 1789 if (sblock->pagev[0]->generation != btrfs_super_generation(s)) 1790 ++fail_gen; 1791 1792 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1793 ++fail_cor; 1794 1795 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; 1796 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; 1797 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; 1798 index = 0; 1799 for (;;) { 1800 u64 l = min_t(u64, len, mapped_size); 1801 1802 crc = btrfs_csum_data(p, crc, l); 1803 kunmap_atomic(mapped_buffer); 1804 len -= l; 1805 if (len == 0) 1806 break; 1807 index++; 1808 BUG_ON(index >= sblock->page_count); 1809 BUG_ON(!sblock->pagev[index]->page); 1810 page = sblock->pagev[index]->page; 1811 mapped_buffer = kmap_atomic(page); 1812 mapped_size = PAGE_SIZE; 1813 p = mapped_buffer; 1814 } 1815 1816 btrfs_csum_final(crc, calculated_csum); 1817 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) 1818 ++fail_cor; 1819 1820 if (fail_cor + fail_gen) { 1821 /* 1822 * if we find an error in a super block, we just report it. 1823 * They will get written with the next transaction commit 1824 * anyway 1825 */ 1826 spin_lock(&sctx->stat_lock); 1827 ++sctx->stat.super_errors; 1828 spin_unlock(&sctx->stat_lock); 1829 if (fail_cor) 1830 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, 1831 BTRFS_DEV_STAT_CORRUPTION_ERRS); 1832 else 1833 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, 1834 BTRFS_DEV_STAT_GENERATION_ERRS); 1835 } 1836 1837 return fail_cor + fail_gen; 1838} 1839 1840static void scrub_block_get(struct scrub_block *sblock) 1841{ 1842 atomic_inc(&sblock->ref_count); 1843} 1844 1845static void scrub_block_put(struct scrub_block *sblock) 1846{ 1847 if (atomic_dec_and_test(&sblock->ref_count)) { 1848 int i; 1849 1850 for (i = 0; i < sblock->page_count; i++) 1851 scrub_page_put(sblock->pagev[i]); 1852 kfree(sblock); 1853 } 1854} 1855 1856static void scrub_page_get(struct scrub_page *spage) 1857{ 1858 atomic_inc(&spage->ref_count); 1859} 1860 1861static void scrub_page_put(struct scrub_page *spage) 1862{ 1863 if (atomic_dec_and_test(&spage->ref_count)) { 1864 if (spage->page) 1865 __free_page(spage->page); 1866 kfree(spage); 1867 } 1868} 1869 1870static void scrub_submit(struct scrub_ctx *sctx) 1871{ 1872 struct scrub_bio *sbio; 1873 1874 if (sctx->curr == -1) 1875 return; 1876 1877 sbio = sctx->bios[sctx->curr]; 1878 sctx->curr = -1; 1879 scrub_pending_bio_inc(sctx); 1880 1881 if (!sbio->bio->bi_bdev) { 1882 /* 1883 * this case should not happen. If btrfs_map_block() is 1884 * wrong, it could happen for dev-replace operations on 1885 * missing devices when no mirrors are available, but in 1886 * this case it should already fail the mount. 1887 * This case is handled correctly (but _very_ slowly). 1888 */ 1889 printk_ratelimited(KERN_WARNING 1890 "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n"); 1891 bio_endio(sbio->bio, -EIO); 1892 } else { 1893 btrfsic_submit_bio(READ, sbio->bio); 1894 } 1895} 1896 1897static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, 1898 struct scrub_page *spage) 1899{ 1900 struct scrub_block *sblock = spage->sblock; 1901 struct scrub_bio *sbio; 1902 int ret; 1903 1904again: 1905 /* 1906 * grab a fresh bio or wait for one to become available 1907 */ 1908 while (sctx->curr == -1) { 1909 spin_lock(&sctx->list_lock); 1910 sctx->curr = sctx->first_free; 1911 if (sctx->curr != -1) { 1912 sctx->first_free = sctx->bios[sctx->curr]->next_free; 1913 sctx->bios[sctx->curr]->next_free = -1; 1914 sctx->bios[sctx->curr]->page_count = 0; 1915 spin_unlock(&sctx->list_lock); 1916 } else { 1917 spin_unlock(&sctx->list_lock); 1918 wait_event(sctx->list_wait, sctx->first_free != -1); 1919 } 1920 } 1921 sbio = sctx->bios[sctx->curr]; 1922 if (sbio->page_count == 0) { 1923 struct bio *bio; 1924 1925 sbio->physical = spage->physical; 1926 sbio->logical = spage->logical; 1927 sbio->dev = spage->dev; 1928 bio = sbio->bio; 1929 if (!bio) { 1930 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); 1931 if (!bio) 1932 return -ENOMEM; 1933 sbio->bio = bio; 1934 } 1935 1936 bio->bi_private = sbio; 1937 bio->bi_end_io = scrub_bio_end_io; 1938 bio->bi_bdev = sbio->dev->bdev; 1939 bio->bi_sector = sbio->physical >> 9; 1940 sbio->err = 0; 1941 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1942 spage->physical || 1943 sbio->logical + sbio->page_count * PAGE_SIZE != 1944 spage->logical || 1945 sbio->dev != spage->dev) { 1946 scrub_submit(sctx); 1947 goto again; 1948 } 1949 1950 sbio->pagev[sbio->page_count] = spage; 1951 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); 1952 if (ret != PAGE_SIZE) { 1953 if (sbio->page_count < 1) { 1954 bio_put(sbio->bio); 1955 sbio->bio = NULL; 1956 return -EIO; 1957 } 1958 scrub_submit(sctx); 1959 goto again; 1960 } 1961 1962 scrub_block_get(sblock); /* one for the page added to the bio */ 1963 atomic_inc(&sblock->outstanding_pages); 1964 sbio->page_count++; 1965 if (sbio->page_count == sctx->pages_per_rd_bio) 1966 scrub_submit(sctx); 1967 1968 return 0; 1969} 1970 1971static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 1972 u64 physical, struct btrfs_device *dev, u64 flags, 1973 u64 gen, int mirror_num, u8 *csum, int force, 1974 u64 physical_for_dev_replace) 1975{ 1976 struct scrub_block *sblock; 1977 int index; 1978 1979 sblock = kzalloc(sizeof(*sblock), GFP_NOFS); 1980 if (!sblock) { 1981 spin_lock(&sctx->stat_lock); 1982 sctx->stat.malloc_errors++; 1983 spin_unlock(&sctx->stat_lock); 1984 return -ENOMEM; 1985 } 1986 1987 /* one ref inside this function, plus one for each page added to 1988 * a bio later on */ 1989 atomic_set(&sblock->ref_count, 1); 1990 sblock->sctx = sctx; 1991 sblock->no_io_error_seen = 1; 1992 1993 for (index = 0; len > 0; index++) { 1994 struct scrub_page *spage; 1995 u64 l = min_t(u64, len, PAGE_SIZE); 1996 1997 spage = kzalloc(sizeof(*spage), GFP_NOFS); 1998 if (!spage) { 1999leave_nomem: 2000 spin_lock(&sctx->stat_lock); 2001 sctx->stat.malloc_errors++; 2002 spin_unlock(&sctx->stat_lock); 2003 scrub_block_put(sblock); 2004 return -ENOMEM; 2005 } 2006 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); 2007 scrub_page_get(spage); 2008 sblock->pagev[index] = spage; 2009 spage->sblock = sblock; 2010 spage->dev = dev; 2011 spage->flags = flags; 2012 spage->generation = gen; 2013 spage->logical = logical; 2014 spage->physical = physical; 2015 spage->physical_for_dev_replace = physical_for_dev_replace; 2016 spage->mirror_num = mirror_num; 2017 if (csum) { 2018 spage->have_csum = 1; 2019 memcpy(spage->csum, csum, sctx->csum_size); 2020 } else { 2021 spage->have_csum = 0; 2022 } 2023 sblock->page_count++; 2024 spage->page = alloc_page(GFP_NOFS); 2025 if (!spage->page) 2026 goto leave_nomem; 2027 len -= l; 2028 logical += l; 2029 physical += l; 2030 physical_for_dev_replace += l; 2031 } 2032 2033 WARN_ON(sblock->page_count == 0); 2034 for (index = 0; index < sblock->page_count; index++) { 2035 struct scrub_page *spage = sblock->pagev[index]; 2036 int ret; 2037 2038 ret = scrub_add_page_to_rd_bio(sctx, spage); 2039 if (ret) { 2040 scrub_block_put(sblock); 2041 return ret; 2042 } 2043 } 2044 2045 if (force) 2046 scrub_submit(sctx); 2047 2048 /* last one frees, either here or in bio completion for last page */ 2049 scrub_block_put(sblock); 2050 return 0; 2051} 2052 2053static void scrub_bio_end_io(struct bio *bio, int err) 2054{ 2055 struct scrub_bio *sbio = bio->bi_private; 2056 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info; 2057 2058 sbio->err = err; 2059 sbio->bio = bio; 2060 2061 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); 2062} 2063 2064static void scrub_bio_end_io_worker(struct btrfs_work *work) 2065{ 2066 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 2067 struct scrub_ctx *sctx = sbio->sctx; 2068 int i; 2069 2070 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); 2071 if (sbio->err) { 2072 for (i = 0; i < sbio->page_count; i++) { 2073 struct scrub_page *spage = sbio->pagev[i]; 2074 2075 spage->io_error = 1; 2076 spage->sblock->no_io_error_seen = 0; 2077 } 2078 } 2079 2080 /* now complete the scrub_block items that have all pages completed */ 2081 for (i = 0; i < sbio->page_count; i++) { 2082 struct scrub_page *spage = sbio->pagev[i]; 2083 struct scrub_block *sblock = spage->sblock; 2084 2085 if (atomic_dec_and_test(&sblock->outstanding_pages)) 2086 scrub_block_complete(sblock); 2087 scrub_block_put(sblock); 2088 } 2089 2090 bio_put(sbio->bio); 2091 sbio->bio = NULL; 2092 spin_lock(&sctx->list_lock); 2093 sbio->next_free = sctx->first_free; 2094 sctx->first_free = sbio->index; 2095 spin_unlock(&sctx->list_lock); 2096 2097 if (sctx->is_dev_replace && 2098 atomic_read(&sctx->wr_ctx.flush_all_writes)) { 2099 mutex_lock(&sctx->wr_ctx.wr_lock); 2100 scrub_wr_submit(sctx); 2101 mutex_unlock(&sctx->wr_ctx.wr_lock); 2102 } 2103 2104 scrub_pending_bio_dec(sctx); 2105} 2106 2107static void scrub_block_complete(struct scrub_block *sblock) 2108{ 2109 if (!sblock->no_io_error_seen) { 2110 scrub_handle_errored_block(sblock); 2111 } else { 2112 /* 2113 * if has checksum error, write via repair mechanism in 2114 * dev replace case, otherwise write here in dev replace 2115 * case. 2116 */ 2117 if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace) 2118 scrub_write_block_to_dev_replace(sblock); 2119 } 2120} 2121 2122static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len, 2123 u8 *csum) 2124{ 2125 struct btrfs_ordered_sum *sum = NULL; 2126 unsigned long index; 2127 unsigned long num_sectors; 2128 2129 while (!list_empty(&sctx->csum_list)) { 2130 sum = list_first_entry(&sctx->csum_list, 2131 struct btrfs_ordered_sum, list); 2132 if (sum->bytenr > logical) 2133 return 0; 2134 if (sum->bytenr + sum->len > logical) 2135 break; 2136 2137 ++sctx->stat.csum_discards; 2138 list_del(&sum->list); 2139 kfree(sum); 2140 sum = NULL; 2141 } 2142 if (!sum) 2143 return 0; 2144 2145 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize; 2146 num_sectors = sum->len / sctx->sectorsize; 2147 memcpy(csum, sum->sums + index, sctx->csum_size); 2148 if (index == num_sectors - 1) { 2149 list_del(&sum->list); 2150 kfree(sum); 2151 } 2152 return 1; 2153} 2154 2155/* scrub extent tries to collect up to 64 kB for each bio */ 2156static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, 2157 u64 physical, struct btrfs_device *dev, u64 flags, 2158 u64 gen, int mirror_num, u64 physical_for_dev_replace) 2159{ 2160 int ret; 2161 u8 csum[BTRFS_CSUM_SIZE]; 2162 u32 blocksize; 2163 2164 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2165 blocksize = sctx->sectorsize; 2166 spin_lock(&sctx->stat_lock); 2167 sctx->stat.data_extents_scrubbed++; 2168 sctx->stat.data_bytes_scrubbed += len; 2169 spin_unlock(&sctx->stat_lock); 2170 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 2171 WARN_ON(sctx->nodesize != sctx->leafsize); 2172 blocksize = sctx->nodesize; 2173 spin_lock(&sctx->stat_lock); 2174 sctx->stat.tree_extents_scrubbed++; 2175 sctx->stat.tree_bytes_scrubbed += len; 2176 spin_unlock(&sctx->stat_lock); 2177 } else { 2178 blocksize = sctx->sectorsize; 2179 WARN_ON(1); 2180 } 2181 2182 while (len) { 2183 u64 l = min_t(u64, len, blocksize); 2184 int have_csum = 0; 2185 2186 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2187 /* push csums to sbio */ 2188 have_csum = scrub_find_csum(sctx, logical, l, csum); 2189 if (have_csum == 0) 2190 ++sctx->stat.no_csum; 2191 if (sctx->is_dev_replace && !have_csum) { 2192 ret = copy_nocow_pages(sctx, logical, l, 2193 mirror_num, 2194 physical_for_dev_replace); 2195 goto behind_scrub_pages; 2196 } 2197 } 2198 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, 2199 mirror_num, have_csum ? csum : NULL, 0, 2200 physical_for_dev_replace); 2201behind_scrub_pages: 2202 if (ret) 2203 return ret; 2204 len -= l; 2205 logical += l; 2206 physical += l; 2207 physical_for_dev_replace += l; 2208 } 2209 return 0; 2210} 2211 2212static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 2213 struct map_lookup *map, 2214 struct btrfs_device *scrub_dev, 2215 int num, u64 base, u64 length, 2216 int is_dev_replace) 2217{ 2218 struct btrfs_path *path; 2219 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 2220 struct btrfs_root *root = fs_info->extent_root; 2221 struct btrfs_root *csum_root = fs_info->csum_root; 2222 struct btrfs_extent_item *extent; 2223 struct blk_plug plug; 2224 u64 flags; 2225 int ret; 2226 int slot; 2227 u64 nstripes; 2228 struct extent_buffer *l; 2229 struct btrfs_key key; 2230 u64 physical; 2231 u64 logical; 2232 u64 logic_end; 2233 u64 generation; 2234 int mirror_num; 2235 struct reada_control *reada1; 2236 struct reada_control *reada2; 2237 struct btrfs_key key_start; 2238 struct btrfs_key key_end; 2239 u64 increment = map->stripe_len; 2240 u64 offset; 2241 u64 extent_logical; 2242 u64 extent_physical; 2243 u64 extent_len; 2244 struct btrfs_device *extent_dev; 2245 int extent_mirror_num; 2246 int stop_loop; 2247 2248 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 2249 BTRFS_BLOCK_GROUP_RAID6)) { 2250 if (num >= nr_data_stripes(map)) { 2251 return 0; 2252 } 2253 } 2254 2255 nstripes = length; 2256 offset = 0; 2257 do_div(nstripes, map->stripe_len); 2258 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 2259 offset = map->stripe_len * num; 2260 increment = map->stripe_len * map->num_stripes; 2261 mirror_num = 1; 2262 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 2263 int factor = map->num_stripes / map->sub_stripes; 2264 offset = map->stripe_len * (num / map->sub_stripes); 2265 increment = map->stripe_len * factor; 2266 mirror_num = num % map->sub_stripes + 1; 2267 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 2268 increment = map->stripe_len; 2269 mirror_num = num % map->num_stripes + 1; 2270 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 2271 increment = map->stripe_len; 2272 mirror_num = num % map->num_stripes + 1; 2273 } else { 2274 increment = map->stripe_len; 2275 mirror_num = 1; 2276 } 2277 2278 path = btrfs_alloc_path(); 2279 if (!path) 2280 return -ENOMEM; 2281 2282 /* 2283 * work on commit root. The related disk blocks are static as 2284 * long as COW is applied. This means, it is save to rewrite 2285 * them to repair disk errors without any race conditions 2286 */ 2287 path->search_commit_root = 1; 2288 path->skip_locking = 1; 2289 2290 /* 2291 * trigger the readahead for extent tree csum tree and wait for 2292 * completion. During readahead, the scrub is officially paused 2293 * to not hold off transaction commits 2294 */ 2295 logical = base + offset; 2296 2297 wait_event(sctx->list_wait, 2298 atomic_read(&sctx->bios_in_flight) == 0); 2299 atomic_inc(&fs_info->scrubs_paused); 2300 wake_up(&fs_info->scrub_pause_wait); 2301 2302 /* FIXME it might be better to start readahead at commit root */ 2303 key_start.objectid = logical; 2304 key_start.type = BTRFS_EXTENT_ITEM_KEY; 2305 key_start.offset = (u64)0; 2306 key_end.objectid = base + offset + nstripes * increment; 2307 key_end.type = BTRFS_METADATA_ITEM_KEY; 2308 key_end.offset = (u64)-1; 2309 reada1 = btrfs_reada_add(root, &key_start, &key_end); 2310 2311 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 2312 key_start.type = BTRFS_EXTENT_CSUM_KEY; 2313 key_start.offset = logical; 2314 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 2315 key_end.type = BTRFS_EXTENT_CSUM_KEY; 2316 key_end.offset = base + offset + nstripes * increment; 2317 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end); 2318 2319 if (!IS_ERR(reada1)) 2320 btrfs_reada_wait(reada1); 2321 if (!IS_ERR(reada2)) 2322 btrfs_reada_wait(reada2); 2323 2324 mutex_lock(&fs_info->scrub_lock); 2325 while (atomic_read(&fs_info->scrub_pause_req)) { 2326 mutex_unlock(&fs_info->scrub_lock); 2327 wait_event(fs_info->scrub_pause_wait, 2328 atomic_read(&fs_info->scrub_pause_req) == 0); 2329 mutex_lock(&fs_info->scrub_lock); 2330 } 2331 atomic_dec(&fs_info->scrubs_paused); 2332 mutex_unlock(&fs_info->scrub_lock); 2333 wake_up(&fs_info->scrub_pause_wait); 2334 2335 /* 2336 * collect all data csums for the stripe to avoid seeking during 2337 * the scrub. This might currently (crc32) end up to be about 1MB 2338 */ 2339 blk_start_plug(&plug); 2340 2341 /* 2342 * now find all extents for each stripe and scrub them 2343 */ 2344 logical = base + offset; 2345 physical = map->stripes[num].physical; 2346 logic_end = logical + increment * nstripes; 2347 ret = 0; 2348 while (logical < logic_end) { 2349 /* 2350 * canceled? 2351 */ 2352 if (atomic_read(&fs_info->scrub_cancel_req) || 2353 atomic_read(&sctx->cancel_req)) { 2354 ret = -ECANCELED; 2355 goto out; 2356 } 2357 /* 2358 * check to see if we have to pause 2359 */ 2360 if (atomic_read(&fs_info->scrub_pause_req)) { 2361 /* push queued extents */ 2362 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); 2363 scrub_submit(sctx); 2364 mutex_lock(&sctx->wr_ctx.wr_lock); 2365 scrub_wr_submit(sctx); 2366 mutex_unlock(&sctx->wr_ctx.wr_lock); 2367 wait_event(sctx->list_wait, 2368 atomic_read(&sctx->bios_in_flight) == 0); 2369 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); 2370 atomic_inc(&fs_info->scrubs_paused); 2371 wake_up(&fs_info->scrub_pause_wait); 2372 mutex_lock(&fs_info->scrub_lock); 2373 while (atomic_read(&fs_info->scrub_pause_req)) { 2374 mutex_unlock(&fs_info->scrub_lock); 2375 wait_event(fs_info->scrub_pause_wait, 2376 atomic_read(&fs_info->scrub_pause_req) == 0); 2377 mutex_lock(&fs_info->scrub_lock); 2378 } 2379 atomic_dec(&fs_info->scrubs_paused); 2380 mutex_unlock(&fs_info->scrub_lock); 2381 wake_up(&fs_info->scrub_pause_wait); 2382 } 2383 2384 key.objectid = logical; 2385 key.type = BTRFS_EXTENT_ITEM_KEY; 2386 key.offset = (u64)-1; 2387 2388 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2389 if (ret < 0) 2390 goto out; 2391 2392 if (ret > 0) { 2393 ret = btrfs_previous_item(root, path, 0, 2394 BTRFS_EXTENT_ITEM_KEY); 2395 if (ret < 0) 2396 goto out; 2397 if (ret > 0) { 2398 /* there's no smaller item, so stick with the 2399 * larger one */ 2400 btrfs_release_path(path); 2401 ret = btrfs_search_slot(NULL, root, &key, 2402 path, 0, 0); 2403 if (ret < 0) 2404 goto out; 2405 } 2406 } 2407 2408 stop_loop = 0; 2409 while (1) { 2410 u64 bytes; 2411 2412 l = path->nodes[0]; 2413 slot = path->slots[0]; 2414 if (slot >= btrfs_header_nritems(l)) { 2415 ret = btrfs_next_leaf(root, path); 2416 if (ret == 0) 2417 continue; 2418 if (ret < 0) 2419 goto out; 2420 2421 stop_loop = 1; 2422 break; 2423 } 2424 btrfs_item_key_to_cpu(l, &key, slot); 2425 2426 if (key.type == BTRFS_METADATA_ITEM_KEY) 2427 bytes = root->leafsize; 2428 else 2429 bytes = key.offset; 2430 2431 if (key.objectid + bytes <= logical) 2432 goto next; 2433 2434 if (key.type != BTRFS_EXTENT_ITEM_KEY && 2435 key.type != BTRFS_METADATA_ITEM_KEY) 2436 goto next; 2437 2438 if (key.objectid >= logical + map->stripe_len) { 2439 /* out of this device extent */ 2440 if (key.objectid >= logic_end) 2441 stop_loop = 1; 2442 break; 2443 } 2444 2445 extent = btrfs_item_ptr(l, slot, 2446 struct btrfs_extent_item); 2447 flags = btrfs_extent_flags(l, extent); 2448 generation = btrfs_extent_generation(l, extent); 2449 2450 if (key.objectid < logical && 2451 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { 2452 printk(KERN_ERR 2453 "btrfs scrub: tree block %llu spanning " 2454 "stripes, ignored. logical=%llu\n", 2455 key.objectid, logical); 2456 goto next; 2457 } 2458 2459again: 2460 extent_logical = key.objectid; 2461 extent_len = bytes; 2462 2463 /* 2464 * trim extent to this stripe 2465 */ 2466 if (extent_logical < logical) { 2467 extent_len -= logical - extent_logical; 2468 extent_logical = logical; 2469 } 2470 if (extent_logical + extent_len > 2471 logical + map->stripe_len) { 2472 extent_len = logical + map->stripe_len - 2473 extent_logical; 2474 } 2475 2476 extent_physical = extent_logical - logical + physical; 2477 extent_dev = scrub_dev; 2478 extent_mirror_num = mirror_num; 2479 if (is_dev_replace) 2480 scrub_remap_extent(fs_info, extent_logical, 2481 extent_len, &extent_physical, 2482 &extent_dev, 2483 &extent_mirror_num); 2484 2485 ret = btrfs_lookup_csums_range(csum_root, logical, 2486 logical + map->stripe_len - 1, 2487 &sctx->csum_list, 1); 2488 if (ret) 2489 goto out; 2490 2491 ret = scrub_extent(sctx, extent_logical, extent_len, 2492 extent_physical, extent_dev, flags, 2493 generation, extent_mirror_num, 2494 extent_logical - logical + physical); 2495 if (ret) 2496 goto out; 2497 2498 scrub_free_csums(sctx); 2499 if (extent_logical + extent_len < 2500 key.objectid + bytes) { 2501 logical += increment; 2502 physical += map->stripe_len; 2503 2504 if (logical < key.objectid + bytes) { 2505 cond_resched(); 2506 goto again; 2507 } 2508 2509 if (logical >= logic_end) { 2510 stop_loop = 1; 2511 break; 2512 } 2513 } 2514next: 2515 path->slots[0]++; 2516 } 2517 btrfs_release_path(path); 2518 logical += increment; 2519 physical += map->stripe_len; 2520 spin_lock(&sctx->stat_lock); 2521 if (stop_loop) 2522 sctx->stat.last_physical = map->stripes[num].physical + 2523 length; 2524 else 2525 sctx->stat.last_physical = physical; 2526 spin_unlock(&sctx->stat_lock); 2527 if (stop_loop) 2528 break; 2529 } 2530out: 2531 /* push queued extents */ 2532 scrub_submit(sctx); 2533 mutex_lock(&sctx->wr_ctx.wr_lock); 2534 scrub_wr_submit(sctx); 2535 mutex_unlock(&sctx->wr_ctx.wr_lock); 2536 2537 blk_finish_plug(&plug); 2538 btrfs_free_path(path); 2539 return ret < 0 ? ret : 0; 2540} 2541 2542static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 2543 struct btrfs_device *scrub_dev, 2544 u64 chunk_tree, u64 chunk_objectid, 2545 u64 chunk_offset, u64 length, 2546 u64 dev_offset, int is_dev_replace) 2547{ 2548 struct btrfs_mapping_tree *map_tree = 2549 &sctx->dev_root->fs_info->mapping_tree; 2550 struct map_lookup *map; 2551 struct extent_map *em; 2552 int i; 2553 int ret = 0; 2554 2555 read_lock(&map_tree->map_tree.lock); 2556 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 2557 read_unlock(&map_tree->map_tree.lock); 2558 2559 if (!em) 2560 return -EINVAL; 2561 2562 map = (struct map_lookup *)em->bdev; 2563 if (em->start != chunk_offset) 2564 goto out; 2565 2566 if (em->len < length) 2567 goto out; 2568 2569 for (i = 0; i < map->num_stripes; ++i) { 2570 if (map->stripes[i].dev->bdev == scrub_dev->bdev && 2571 map->stripes[i].physical == dev_offset) { 2572 ret = scrub_stripe(sctx, map, scrub_dev, i, 2573 chunk_offset, length, 2574 is_dev_replace); 2575 if (ret) 2576 goto out; 2577 } 2578 } 2579out: 2580 free_extent_map(em); 2581 2582 return ret; 2583} 2584 2585static noinline_for_stack 2586int scrub_enumerate_chunks(struct scrub_ctx *sctx, 2587 struct btrfs_device *scrub_dev, u64 start, u64 end, 2588 int is_dev_replace) 2589{ 2590 struct btrfs_dev_extent *dev_extent = NULL; 2591 struct btrfs_path *path; 2592 struct btrfs_root *root = sctx->dev_root; 2593 struct btrfs_fs_info *fs_info = root->fs_info; 2594 u64 length; 2595 u64 chunk_tree; 2596 u64 chunk_objectid; 2597 u64 chunk_offset; 2598 int ret; 2599 int slot; 2600 struct extent_buffer *l; 2601 struct btrfs_key key; 2602 struct btrfs_key found_key; 2603 struct btrfs_block_group_cache *cache; 2604 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 2605 2606 path = btrfs_alloc_path(); 2607 if (!path) 2608 return -ENOMEM; 2609 2610 path->reada = 2; 2611 path->search_commit_root = 1; 2612 path->skip_locking = 1; 2613 2614 key.objectid = scrub_dev->devid; 2615 key.offset = 0ull; 2616 key.type = BTRFS_DEV_EXTENT_KEY; 2617 2618 while (1) { 2619 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2620 if (ret < 0) 2621 break; 2622 if (ret > 0) { 2623 if (path->slots[0] >= 2624 btrfs_header_nritems(path->nodes[0])) { 2625 ret = btrfs_next_leaf(root, path); 2626 if (ret) 2627 break; 2628 } 2629 } 2630 2631 l = path->nodes[0]; 2632 slot = path->slots[0]; 2633 2634 btrfs_item_key_to_cpu(l, &found_key, slot); 2635 2636 if (found_key.objectid != scrub_dev->devid) 2637 break; 2638 2639 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) 2640 break; 2641 2642 if (found_key.offset >= end) 2643 break; 2644 2645 if (found_key.offset < key.offset) 2646 break; 2647 2648 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2649 length = btrfs_dev_extent_length(l, dev_extent); 2650 2651 if (found_key.offset + length <= start) { 2652 key.offset = found_key.offset + length; 2653 btrfs_release_path(path); 2654 continue; 2655 } 2656 2657 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 2658 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 2659 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 2660 2661 /* 2662 * get a reference on the corresponding block group to prevent 2663 * the chunk from going away while we scrub it 2664 */ 2665 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2666 if (!cache) { 2667 ret = -ENOENT; 2668 break; 2669 } 2670 dev_replace->cursor_right = found_key.offset + length; 2671 dev_replace->cursor_left = found_key.offset; 2672 dev_replace->item_needs_writeback = 1; 2673 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid, 2674 chunk_offset, length, found_key.offset, 2675 is_dev_replace); 2676 2677 /* 2678 * flush, submit all pending read and write bios, afterwards 2679 * wait for them. 2680 * Note that in the dev replace case, a read request causes 2681 * write requests that are submitted in the read completion 2682 * worker. Therefore in the current situation, it is required 2683 * that all write requests are flushed, so that all read and 2684 * write requests are really completed when bios_in_flight 2685 * changes to 0. 2686 */ 2687 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); 2688 scrub_submit(sctx); 2689 mutex_lock(&sctx->wr_ctx.wr_lock); 2690 scrub_wr_submit(sctx); 2691 mutex_unlock(&sctx->wr_ctx.wr_lock); 2692 2693 wait_event(sctx->list_wait, 2694 atomic_read(&sctx->bios_in_flight) == 0); 2695 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); 2696 atomic_inc(&fs_info->scrubs_paused); 2697 wake_up(&fs_info->scrub_pause_wait); 2698 wait_event(sctx->list_wait, 2699 atomic_read(&sctx->workers_pending) == 0); 2700 2701 mutex_lock(&fs_info->scrub_lock); 2702 while (atomic_read(&fs_info->scrub_pause_req)) { 2703 mutex_unlock(&fs_info->scrub_lock); 2704 wait_event(fs_info->scrub_pause_wait, 2705 atomic_read(&fs_info->scrub_pause_req) == 0); 2706 mutex_lock(&fs_info->scrub_lock); 2707 } 2708 atomic_dec(&fs_info->scrubs_paused); 2709 mutex_unlock(&fs_info->scrub_lock); 2710 wake_up(&fs_info->scrub_pause_wait); 2711 2712 dev_replace->cursor_left = dev_replace->cursor_right; 2713 dev_replace->item_needs_writeback = 1; 2714 btrfs_put_block_group(cache); 2715 if (ret) 2716 break; 2717 if (is_dev_replace && 2718 atomic64_read(&dev_replace->num_write_errors) > 0) { 2719 ret = -EIO; 2720 break; 2721 } 2722 if (sctx->stat.malloc_errors > 0) { 2723 ret = -ENOMEM; 2724 break; 2725 } 2726 2727 key.offset = found_key.offset + length; 2728 btrfs_release_path(path); 2729 } 2730 2731 btrfs_free_path(path); 2732 2733 /* 2734 * ret can still be 1 from search_slot or next_leaf, 2735 * that's not an error 2736 */ 2737 return ret < 0 ? ret : 0; 2738} 2739 2740static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, 2741 struct btrfs_device *scrub_dev) 2742{ 2743 int i; 2744 u64 bytenr; 2745 u64 gen; 2746 int ret; 2747 struct btrfs_root *root = sctx->dev_root; 2748 2749 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 2750 return -EIO; 2751 2752 gen = root->fs_info->last_trans_committed; 2753 2754 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2755 bytenr = btrfs_sb_offset(i); 2756 if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes) 2757 break; 2758 2759 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, 2760 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, 2761 NULL, 1, bytenr); 2762 if (ret) 2763 return ret; 2764 } 2765 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 2766 2767 return 0; 2768} 2769 2770/* 2771 * get a reference count on fs_info->scrub_workers. start worker if necessary 2772 */ 2773static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, 2774 int is_dev_replace) 2775{ 2776 int ret = 0; 2777 2778 mutex_lock(&fs_info->scrub_lock); 2779 if (fs_info->scrub_workers_refcnt == 0) { 2780 if (is_dev_replace) 2781 btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1, 2782 &fs_info->generic_worker); 2783 else 2784 btrfs_init_workers(&fs_info->scrub_workers, "scrub", 2785 fs_info->thread_pool_size, 2786 &fs_info->generic_worker); 2787 fs_info->scrub_workers.idle_thresh = 4; 2788 ret = btrfs_start_workers(&fs_info->scrub_workers); 2789 if (ret) 2790 goto out; 2791 btrfs_init_workers(&fs_info->scrub_wr_completion_workers, 2792 "scrubwrc", 2793 fs_info->thread_pool_size, 2794 &fs_info->generic_worker); 2795 fs_info->scrub_wr_completion_workers.idle_thresh = 2; 2796 ret = btrfs_start_workers( 2797 &fs_info->scrub_wr_completion_workers); 2798 if (ret) 2799 goto out; 2800 btrfs_init_workers(&fs_info->scrub_nocow_workers, "scrubnc", 1, 2801 &fs_info->generic_worker); 2802 ret = btrfs_start_workers(&fs_info->scrub_nocow_workers); 2803 if (ret) 2804 goto out; 2805 } 2806 ++fs_info->scrub_workers_refcnt; 2807out: 2808 mutex_unlock(&fs_info->scrub_lock); 2809 2810 return ret; 2811} 2812 2813static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) 2814{ 2815 mutex_lock(&fs_info->scrub_lock); 2816 if (--fs_info->scrub_workers_refcnt == 0) { 2817 btrfs_stop_workers(&fs_info->scrub_workers); 2818 btrfs_stop_workers(&fs_info->scrub_wr_completion_workers); 2819 btrfs_stop_workers(&fs_info->scrub_nocow_workers); 2820 } 2821 WARN_ON(fs_info->scrub_workers_refcnt < 0); 2822 mutex_unlock(&fs_info->scrub_lock); 2823} 2824 2825int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 2826 u64 end, struct btrfs_scrub_progress *progress, 2827 int readonly, int is_dev_replace) 2828{ 2829 struct scrub_ctx *sctx; 2830 int ret; 2831 struct btrfs_device *dev; 2832 2833 if (btrfs_fs_closing(fs_info)) 2834 return -EINVAL; 2835 2836 /* 2837 * check some assumptions 2838 */ 2839 if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) { 2840 printk(KERN_ERR 2841 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n", 2842 fs_info->chunk_root->nodesize, 2843 fs_info->chunk_root->leafsize); 2844 return -EINVAL; 2845 } 2846 2847 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) { 2848 /* 2849 * in this case scrub is unable to calculate the checksum 2850 * the way scrub is implemented. Do not handle this 2851 * situation at all because it won't ever happen. 2852 */ 2853 printk(KERN_ERR 2854 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n", 2855 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN); 2856 return -EINVAL; 2857 } 2858 2859 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) { 2860 /* not supported for data w/o checksums */ 2861 printk(KERN_ERR 2862 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails\n", 2863 fs_info->chunk_root->sectorsize, PAGE_SIZE); 2864 return -EINVAL; 2865 } 2866 2867 if (fs_info->chunk_root->nodesize > 2868 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || 2869 fs_info->chunk_root->sectorsize > 2870 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { 2871 /* 2872 * would exhaust the array bounds of pagev member in 2873 * struct scrub_block 2874 */ 2875 pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n", 2876 fs_info->chunk_root->nodesize, 2877 SCRUB_MAX_PAGES_PER_BLOCK, 2878 fs_info->chunk_root->sectorsize, 2879 SCRUB_MAX_PAGES_PER_BLOCK); 2880 return -EINVAL; 2881 } 2882 2883 ret = scrub_workers_get(fs_info, is_dev_replace); 2884 if (ret) 2885 return ret; 2886 2887 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2888 dev = btrfs_find_device(fs_info, devid, NULL, NULL); 2889 if (!dev || (dev->missing && !is_dev_replace)) { 2890 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2891 scrub_workers_put(fs_info); 2892 return -ENODEV; 2893 } 2894 mutex_lock(&fs_info->scrub_lock); 2895 2896 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) { 2897 mutex_unlock(&fs_info->scrub_lock); 2898 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2899 scrub_workers_put(fs_info); 2900 return -EIO; 2901 } 2902 2903 btrfs_dev_replace_lock(&fs_info->dev_replace); 2904 if (dev->scrub_device || 2905 (!is_dev_replace && 2906 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { 2907 btrfs_dev_replace_unlock(&fs_info->dev_replace); 2908 mutex_unlock(&fs_info->scrub_lock); 2909 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2910 scrub_workers_put(fs_info); 2911 return -EINPROGRESS; 2912 } 2913 btrfs_dev_replace_unlock(&fs_info->dev_replace); 2914 sctx = scrub_setup_ctx(dev, is_dev_replace); 2915 if (IS_ERR(sctx)) { 2916 mutex_unlock(&fs_info->scrub_lock); 2917 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2918 scrub_workers_put(fs_info); 2919 return PTR_ERR(sctx); 2920 } 2921 sctx->readonly = readonly; 2922 dev->scrub_device = sctx; 2923 2924 atomic_inc(&fs_info->scrubs_running); 2925 mutex_unlock(&fs_info->scrub_lock); 2926 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2927 2928 if (!is_dev_replace) { 2929 down_read(&fs_info->scrub_super_lock); 2930 ret = scrub_supers(sctx, dev); 2931 up_read(&fs_info->scrub_super_lock); 2932 } 2933 2934 if (!ret) 2935 ret = scrub_enumerate_chunks(sctx, dev, start, end, 2936 is_dev_replace); 2937 2938 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 2939 atomic_dec(&fs_info->scrubs_running); 2940 wake_up(&fs_info->scrub_pause_wait); 2941 2942 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); 2943 2944 if (progress) 2945 memcpy(progress, &sctx->stat, sizeof(*progress)); 2946 2947 mutex_lock(&fs_info->scrub_lock); 2948 dev->scrub_device = NULL; 2949 mutex_unlock(&fs_info->scrub_lock); 2950 2951 scrub_free_ctx(sctx); 2952 scrub_workers_put(fs_info); 2953 2954 return ret; 2955} 2956 2957void btrfs_scrub_pause(struct btrfs_root *root) 2958{ 2959 struct btrfs_fs_info *fs_info = root->fs_info; 2960 2961 mutex_lock(&fs_info->scrub_lock); 2962 atomic_inc(&fs_info->scrub_pause_req); 2963 while (atomic_read(&fs_info->scrubs_paused) != 2964 atomic_read(&fs_info->scrubs_running)) { 2965 mutex_unlock(&fs_info->scrub_lock); 2966 wait_event(fs_info->scrub_pause_wait, 2967 atomic_read(&fs_info->scrubs_paused) == 2968 atomic_read(&fs_info->scrubs_running)); 2969 mutex_lock(&fs_info->scrub_lock); 2970 } 2971 mutex_unlock(&fs_info->scrub_lock); 2972} 2973 2974void btrfs_scrub_continue(struct btrfs_root *root) 2975{ 2976 struct btrfs_fs_info *fs_info = root->fs_info; 2977 2978 atomic_dec(&fs_info->scrub_pause_req); 2979 wake_up(&fs_info->scrub_pause_wait); 2980} 2981 2982void btrfs_scrub_pause_super(struct btrfs_root *root) 2983{ 2984 down_write(&root->fs_info->scrub_super_lock); 2985} 2986 2987void btrfs_scrub_continue_super(struct btrfs_root *root) 2988{ 2989 up_write(&root->fs_info->scrub_super_lock); 2990} 2991 2992int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) 2993{ 2994 mutex_lock(&fs_info->scrub_lock); 2995 if (!atomic_read(&fs_info->scrubs_running)) { 2996 mutex_unlock(&fs_info->scrub_lock); 2997 return -ENOTCONN; 2998 } 2999 3000 atomic_inc(&fs_info->scrub_cancel_req); 3001 while (atomic_read(&fs_info->scrubs_running)) { 3002 mutex_unlock(&fs_info->scrub_lock); 3003 wait_event(fs_info->scrub_pause_wait, 3004 atomic_read(&fs_info->scrubs_running) == 0); 3005 mutex_lock(&fs_info->scrub_lock); 3006 } 3007 atomic_dec(&fs_info->scrub_cancel_req); 3008 mutex_unlock(&fs_info->scrub_lock); 3009 3010 return 0; 3011} 3012 3013int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info, 3014 struct btrfs_device *dev) 3015{ 3016 struct scrub_ctx *sctx; 3017 3018 mutex_lock(&fs_info->scrub_lock); 3019 sctx = dev->scrub_device; 3020 if (!sctx) { 3021 mutex_unlock(&fs_info->scrub_lock); 3022 return -ENOTCONN; 3023 } 3024 atomic_inc(&sctx->cancel_req); 3025 while (dev->scrub_device) { 3026 mutex_unlock(&fs_info->scrub_lock); 3027 wait_event(fs_info->scrub_pause_wait, 3028 dev->scrub_device == NULL); 3029 mutex_lock(&fs_info->scrub_lock); 3030 } 3031 mutex_unlock(&fs_info->scrub_lock); 3032 3033 return 0; 3034} 3035 3036int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 3037 struct btrfs_scrub_progress *progress) 3038{ 3039 struct btrfs_device *dev; 3040 struct scrub_ctx *sctx = NULL; 3041 3042 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 3043 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL); 3044 if (dev) 3045 sctx = dev->scrub_device; 3046 if (sctx) 3047 memcpy(progress, &sctx->stat, sizeof(*progress)); 3048 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 3049 3050 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; 3051} 3052 3053static void scrub_remap_extent(struct btrfs_fs_info *fs_info, 3054 u64 extent_logical, u64 extent_len, 3055 u64 *extent_physical, 3056 struct btrfs_device **extent_dev, 3057 int *extent_mirror_num) 3058{ 3059 u64 mapped_length; 3060 struct btrfs_bio *bbio = NULL; 3061 int ret; 3062 3063 mapped_length = extent_len; 3064 ret = btrfs_map_block(fs_info, READ, extent_logical, 3065 &mapped_length, &bbio, 0); 3066 if (ret || !bbio || mapped_length < extent_len || 3067 !bbio->stripes[0].dev->bdev) { 3068 kfree(bbio); 3069 return; 3070 } 3071 3072 *extent_physical = bbio->stripes[0].physical; 3073 *extent_mirror_num = bbio->mirror_num; 3074 *extent_dev = bbio->stripes[0].dev; 3075 kfree(bbio); 3076} 3077 3078static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, 3079 struct scrub_wr_ctx *wr_ctx, 3080 struct btrfs_fs_info *fs_info, 3081 struct btrfs_device *dev, 3082 int is_dev_replace) 3083{ 3084 WARN_ON(wr_ctx->wr_curr_bio != NULL); 3085 3086 mutex_init(&wr_ctx->wr_lock); 3087 wr_ctx->wr_curr_bio = NULL; 3088 if (!is_dev_replace) 3089 return 0; 3090 3091 WARN_ON(!dev->bdev); 3092 wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO, 3093 bio_get_nr_vecs(dev->bdev)); 3094 wr_ctx->tgtdev = dev; 3095 atomic_set(&wr_ctx->flush_all_writes, 0); 3096 return 0; 3097} 3098 3099static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx) 3100{ 3101 mutex_lock(&wr_ctx->wr_lock); 3102 kfree(wr_ctx->wr_curr_bio); 3103 wr_ctx->wr_curr_bio = NULL; 3104 mutex_unlock(&wr_ctx->wr_lock); 3105} 3106 3107static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 3108 int mirror_num, u64 physical_for_dev_replace) 3109{ 3110 struct scrub_copy_nocow_ctx *nocow_ctx; 3111 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 3112 3113 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS); 3114 if (!nocow_ctx) { 3115 spin_lock(&sctx->stat_lock); 3116 sctx->stat.malloc_errors++; 3117 spin_unlock(&sctx->stat_lock); 3118 return -ENOMEM; 3119 } 3120 3121 scrub_pending_trans_workers_inc(sctx); 3122 3123 nocow_ctx->sctx = sctx; 3124 nocow_ctx->logical = logical; 3125 nocow_ctx->len = len; 3126 nocow_ctx->mirror_num = mirror_num; 3127 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; 3128 nocow_ctx->work.func = copy_nocow_pages_worker; 3129 btrfs_queue_worker(&fs_info->scrub_nocow_workers, 3130 &nocow_ctx->work); 3131 3132 return 0; 3133} 3134 3135static void copy_nocow_pages_worker(struct btrfs_work *work) 3136{ 3137 struct scrub_copy_nocow_ctx *nocow_ctx = 3138 container_of(work, struct scrub_copy_nocow_ctx, work); 3139 struct scrub_ctx *sctx = nocow_ctx->sctx; 3140 u64 logical = nocow_ctx->logical; 3141 u64 len = nocow_ctx->len; 3142 int mirror_num = nocow_ctx->mirror_num; 3143 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; 3144 int ret; 3145 struct btrfs_trans_handle *trans = NULL; 3146 struct btrfs_fs_info *fs_info; 3147 struct btrfs_path *path; 3148 struct btrfs_root *root; 3149 int not_written = 0; 3150 3151 fs_info = sctx->dev_root->fs_info; 3152 root = fs_info->extent_root; 3153 3154 path = btrfs_alloc_path(); 3155 if (!path) { 3156 spin_lock(&sctx->stat_lock); 3157 sctx->stat.malloc_errors++; 3158 spin_unlock(&sctx->stat_lock); 3159 not_written = 1; 3160 goto out; 3161 } 3162 3163 trans = btrfs_join_transaction(root); 3164 if (IS_ERR(trans)) { 3165 not_written = 1; 3166 goto out; 3167 } 3168 3169 ret = iterate_inodes_from_logical(logical, fs_info, path, 3170 copy_nocow_pages_for_inode, 3171 nocow_ctx); 3172 if (ret != 0 && ret != -ENOENT) { 3173 pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %llu, ret %d\n", 3174 logical, physical_for_dev_replace, len, 3175 (unsigned long long)mirror_num, ret); 3176 not_written = 1; 3177 goto out; 3178 } 3179 3180out: 3181 if (trans && !IS_ERR(trans)) 3182 btrfs_end_transaction(trans, root); 3183 if (not_written) 3184 btrfs_dev_replace_stats_inc(&fs_info->dev_replace. 3185 num_uncorrectable_read_errors); 3186 3187 btrfs_free_path(path); 3188 kfree(nocow_ctx); 3189 3190 scrub_pending_trans_workers_dec(sctx); 3191} 3192 3193static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx) 3194{ 3195 struct scrub_copy_nocow_ctx *nocow_ctx = ctx; 3196 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; 3197 struct btrfs_key key; 3198 struct inode *inode; 3199 struct page *page; 3200 struct btrfs_root *local_root; 3201 u64 physical_for_dev_replace; 3202 u64 len; 3203 unsigned long index; 3204 int srcu_index; 3205 int ret; 3206 int err; 3207 3208 key.objectid = root; 3209 key.type = BTRFS_ROOT_ITEM_KEY; 3210 key.offset = (u64)-1; 3211 3212 srcu_index = srcu_read_lock(&fs_info->subvol_srcu); 3213 3214 local_root = btrfs_read_fs_root_no_name(fs_info, &key); 3215 if (IS_ERR(local_root)) { 3216 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 3217 return PTR_ERR(local_root); 3218 } 3219 3220 if (btrfs_root_refs(&local_root->root_item) == 0) { 3221 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 3222 return -ENOENT; 3223 } 3224 3225 key.type = BTRFS_INODE_ITEM_KEY; 3226 key.objectid = inum; 3227 key.offset = 0; 3228 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); 3229 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 3230 if (IS_ERR(inode)) 3231 return PTR_ERR(inode); 3232 3233 /* Avoid truncate/dio/punch hole.. */ 3234 mutex_lock(&inode->i_mutex); 3235 inode_dio_wait(inode); 3236 3237 ret = 0; 3238 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; 3239 len = nocow_ctx->len; 3240 while (len >= PAGE_CACHE_SIZE) { 3241 index = offset >> PAGE_CACHE_SHIFT; 3242again: 3243 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 3244 if (!page) { 3245 pr_err("find_or_create_page() failed\n"); 3246 ret = -ENOMEM; 3247 goto out; 3248 } 3249 3250 if (PageUptodate(page)) { 3251 if (PageDirty(page)) 3252 goto next_page; 3253 } else { 3254 ClearPageError(page); 3255 err = extent_read_full_page(&BTRFS_I(inode)-> 3256 io_tree, 3257 page, btrfs_get_extent, 3258 nocow_ctx->mirror_num); 3259 if (err) { 3260 ret = err; 3261 goto next_page; 3262 } 3263 3264 lock_page(page); 3265 /* 3266 * If the page has been remove from the page cache, 3267 * the data on it is meaningless, because it may be 3268 * old one, the new data may be written into the new 3269 * page in the page cache. 3270 */ 3271 if (page->mapping != inode->i_mapping) { 3272 page_cache_release(page); 3273 goto again; 3274 } 3275 if (!PageUptodate(page)) { 3276 ret = -EIO; 3277 goto next_page; 3278 } 3279 } 3280 err = write_page_nocow(nocow_ctx->sctx, 3281 physical_for_dev_replace, page); 3282 if (err) 3283 ret = err; 3284next_page: 3285 unlock_page(page); 3286 page_cache_release(page); 3287 3288 if (ret) 3289 break; 3290 3291 offset += PAGE_CACHE_SIZE; 3292 physical_for_dev_replace += PAGE_CACHE_SIZE; 3293 len -= PAGE_CACHE_SIZE; 3294 } 3295out: 3296 mutex_unlock(&inode->i_mutex); 3297 iput(inode); 3298 return ret; 3299} 3300 3301static int write_page_nocow(struct scrub_ctx *sctx, 3302 u64 physical_for_dev_replace, struct page *page) 3303{ 3304 struct bio *bio; 3305 struct btrfs_device *dev; 3306 int ret; 3307 DECLARE_COMPLETION_ONSTACK(compl); 3308 3309 dev = sctx->wr_ctx.tgtdev; 3310 if (!dev) 3311 return -EIO; 3312 if (!dev->bdev) { 3313 printk_ratelimited(KERN_WARNING 3314 "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n"); 3315 return -EIO; 3316 } 3317 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 3318 if (!bio) { 3319 spin_lock(&sctx->stat_lock); 3320 sctx->stat.malloc_errors++; 3321 spin_unlock(&sctx->stat_lock); 3322 return -ENOMEM; 3323 } 3324 bio->bi_private = &compl; 3325 bio->bi_end_io = scrub_complete_bio_end_io; 3326 bio->bi_size = 0; 3327 bio->bi_sector = physical_for_dev_replace >> 9; 3328 bio->bi_bdev = dev->bdev; 3329 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 3330 if (ret != PAGE_CACHE_SIZE) { 3331leave_with_eio: 3332 bio_put(bio); 3333 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 3334 return -EIO; 3335 } 3336 btrfsic_submit_bio(WRITE_SYNC, bio); 3337 wait_for_completion(&compl); 3338 3339 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 3340 goto leave_with_eio; 3341 3342 bio_put(bio); 3343 return 0; 3344} 3345