scrub.c revision 34f5c8e90b3f002672cd6b4e6e7c5b959fd981ae
1/* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19#include <linux/blkdev.h> 20#include <linux/ratelimit.h> 21#include "ctree.h" 22#include "volumes.h" 23#include "disk-io.h" 24#include "ordered-data.h" 25#include "transaction.h" 26#include "backref.h" 27#include "extent_io.h" 28#include "check-integrity.h" 29#include "rcu-string.h" 30 31/* 32 * This is only the first step towards a full-features scrub. It reads all 33 * extent and super block and verifies the checksums. In case a bad checksum 34 * is found or the extent cannot be read, good data will be written back if 35 * any can be found. 36 * 37 * Future enhancements: 38 * - In case an unrepairable extent is encountered, track which files are 39 * affected and report them 40 * - track and record media errors, throw out bad devices 41 * - add a mode to also read unallocated space 42 */ 43 44struct scrub_block; 45struct scrub_ctx; 46 47#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ 48#define SCRUB_BIOS_PER_CTX 16 /* 1 MB per device in flight */ 49 50/* 51 * the following value times PAGE_SIZE needs to be large enough to match the 52 * largest node/leaf/sector size that shall be supported. 53 * Values larger than BTRFS_STRIPE_LEN are not supported. 54 */ 55#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ 56 57struct scrub_page { 58 struct scrub_block *sblock; 59 struct page *page; 60 struct btrfs_device *dev; 61 u64 flags; /* extent flags */ 62 u64 generation; 63 u64 logical; 64 u64 physical; 65 atomic_t ref_count; 66 struct { 67 unsigned int mirror_num:8; 68 unsigned int have_csum:1; 69 unsigned int io_error:1; 70 }; 71 u8 csum[BTRFS_CSUM_SIZE]; 72}; 73 74struct scrub_bio { 75 int index; 76 struct scrub_ctx *sctx; 77 struct btrfs_device *dev; 78 struct bio *bio; 79 int err; 80 u64 logical; 81 u64 physical; 82 struct scrub_page *pagev[SCRUB_PAGES_PER_BIO]; 83 int page_count; 84 int next_free; 85 struct btrfs_work work; 86}; 87 88struct scrub_block { 89 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; 90 int page_count; 91 atomic_t outstanding_pages; 92 atomic_t ref_count; /* free mem on transition to zero */ 93 struct scrub_ctx *sctx; 94 struct { 95 unsigned int header_error:1; 96 unsigned int checksum_error:1; 97 unsigned int no_io_error_seen:1; 98 unsigned int generation_error:1; /* also sets header_error */ 99 }; 100}; 101 102struct scrub_ctx { 103 struct scrub_bio *bios[SCRUB_BIOS_PER_CTX]; 104 struct btrfs_root *dev_root; 105 int first_free; 106 int curr; 107 atomic_t in_flight; 108 atomic_t fixup_cnt; 109 spinlock_t list_lock; 110 wait_queue_head_t list_wait; 111 u16 csum_size; 112 struct list_head csum_list; 113 atomic_t cancel_req; 114 int readonly; 115 int pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */ 116 u32 sectorsize; 117 u32 nodesize; 118 u32 leafsize; 119 /* 120 * statistics 121 */ 122 struct btrfs_scrub_progress stat; 123 spinlock_t stat_lock; 124}; 125 126struct scrub_fixup_nodatasum { 127 struct scrub_ctx *sctx; 128 struct btrfs_device *dev; 129 u64 logical; 130 struct btrfs_root *root; 131 struct btrfs_work work; 132 int mirror_num; 133}; 134 135struct scrub_warning { 136 struct btrfs_path *path; 137 u64 extent_item_size; 138 char *scratch_buf; 139 char *msg_buf; 140 const char *errstr; 141 sector_t sector; 142 u64 logical; 143 struct btrfs_device *dev; 144 int msg_bufsize; 145 int scratch_bufsize; 146}; 147 148 149static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); 150static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 151 struct btrfs_mapping_tree *map_tree, 152 u64 length, u64 logical, 153 struct scrub_block *sblock); 154static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 155 struct scrub_block *sblock, int is_metadata, 156 int have_csum, u8 *csum, u64 generation, 157 u16 csum_size); 158static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, 159 struct scrub_block *sblock, 160 int is_metadata, int have_csum, 161 const u8 *csum, u64 generation, 162 u16 csum_size); 163static void scrub_complete_bio_end_io(struct bio *bio, int err); 164static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 165 struct scrub_block *sblock_good, 166 int force_write); 167static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, 168 struct scrub_block *sblock_good, 169 int page_num, int force_write); 170static int scrub_checksum_data(struct scrub_block *sblock); 171static int scrub_checksum_tree_block(struct scrub_block *sblock); 172static int scrub_checksum_super(struct scrub_block *sblock); 173static void scrub_block_get(struct scrub_block *sblock); 174static void scrub_block_put(struct scrub_block *sblock); 175static void scrub_page_get(struct scrub_page *spage); 176static void scrub_page_put(struct scrub_page *spage); 177static int scrub_add_page_to_bio(struct scrub_ctx *sctx, 178 struct scrub_page *spage); 179static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 180 u64 physical, struct btrfs_device *dev, u64 flags, 181 u64 gen, int mirror_num, u8 *csum, int force); 182static void scrub_bio_end_io(struct bio *bio, int err); 183static void scrub_bio_end_io_worker(struct btrfs_work *work); 184static void scrub_block_complete(struct scrub_block *sblock); 185 186 187static void scrub_free_csums(struct scrub_ctx *sctx) 188{ 189 while (!list_empty(&sctx->csum_list)) { 190 struct btrfs_ordered_sum *sum; 191 sum = list_first_entry(&sctx->csum_list, 192 struct btrfs_ordered_sum, list); 193 list_del(&sum->list); 194 kfree(sum); 195 } 196} 197 198static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) 199{ 200 int i; 201 202 if (!sctx) 203 return; 204 205 /* this can happen when scrub is cancelled */ 206 if (sctx->curr != -1) { 207 struct scrub_bio *sbio = sctx->bios[sctx->curr]; 208 209 for (i = 0; i < sbio->page_count; i++) { 210 BUG_ON(!sbio->pagev[i]); 211 BUG_ON(!sbio->pagev[i]->page); 212 scrub_block_put(sbio->pagev[i]->sblock); 213 } 214 bio_put(sbio->bio); 215 } 216 217 for (i = 0; i < SCRUB_BIOS_PER_CTX; ++i) { 218 struct scrub_bio *sbio = sctx->bios[i]; 219 220 if (!sbio) 221 break; 222 kfree(sbio); 223 } 224 225 scrub_free_csums(sctx); 226 kfree(sctx); 227} 228 229static noinline_for_stack 230struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev) 231{ 232 struct scrub_ctx *sctx; 233 int i; 234 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 235 int pages_per_bio; 236 237 pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO, 238 bio_get_nr_vecs(dev->bdev)); 239 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); 240 if (!sctx) 241 goto nomem; 242 sctx->pages_per_bio = pages_per_bio; 243 sctx->curr = -1; 244 sctx->dev_root = dev->dev_root; 245 for (i = 0; i < SCRUB_BIOS_PER_CTX; ++i) { 246 struct scrub_bio *sbio; 247 248 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 249 if (!sbio) 250 goto nomem; 251 sctx->bios[i] = sbio; 252 253 sbio->index = i; 254 sbio->sctx = sctx; 255 sbio->page_count = 0; 256 sbio->work.func = scrub_bio_end_io_worker; 257 258 if (i != SCRUB_BIOS_PER_CTX - 1) 259 sctx->bios[i]->next_free = i + 1; 260 else 261 sctx->bios[i]->next_free = -1; 262 } 263 sctx->first_free = 0; 264 sctx->nodesize = dev->dev_root->nodesize; 265 sctx->leafsize = dev->dev_root->leafsize; 266 sctx->sectorsize = dev->dev_root->sectorsize; 267 atomic_set(&sctx->in_flight, 0); 268 atomic_set(&sctx->fixup_cnt, 0); 269 atomic_set(&sctx->cancel_req, 0); 270 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); 271 INIT_LIST_HEAD(&sctx->csum_list); 272 273 spin_lock_init(&sctx->list_lock); 274 spin_lock_init(&sctx->stat_lock); 275 init_waitqueue_head(&sctx->list_wait); 276 return sctx; 277 278nomem: 279 scrub_free_ctx(sctx); 280 return ERR_PTR(-ENOMEM); 281} 282 283static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) 284{ 285 u64 isize; 286 u32 nlink; 287 int ret; 288 int i; 289 struct extent_buffer *eb; 290 struct btrfs_inode_item *inode_item; 291 struct scrub_warning *swarn = ctx; 292 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info; 293 struct inode_fs_paths *ipath = NULL; 294 struct btrfs_root *local_root; 295 struct btrfs_key root_key; 296 297 root_key.objectid = root; 298 root_key.type = BTRFS_ROOT_ITEM_KEY; 299 root_key.offset = (u64)-1; 300 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); 301 if (IS_ERR(local_root)) { 302 ret = PTR_ERR(local_root); 303 goto err; 304 } 305 306 ret = inode_item_info(inum, 0, local_root, swarn->path); 307 if (ret) { 308 btrfs_release_path(swarn->path); 309 goto err; 310 } 311 312 eb = swarn->path->nodes[0]; 313 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], 314 struct btrfs_inode_item); 315 isize = btrfs_inode_size(eb, inode_item); 316 nlink = btrfs_inode_nlink(eb, inode_item); 317 btrfs_release_path(swarn->path); 318 319 ipath = init_ipath(4096, local_root, swarn->path); 320 if (IS_ERR(ipath)) { 321 ret = PTR_ERR(ipath); 322 ipath = NULL; 323 goto err; 324 } 325 ret = paths_from_inode(inum, ipath); 326 327 if (ret < 0) 328 goto err; 329 330 /* 331 * we deliberately ignore the bit ipath might have been too small to 332 * hold all of the paths here 333 */ 334 for (i = 0; i < ipath->fspath->elem_cnt; ++i) 335 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " 336 "%s, sector %llu, root %llu, inode %llu, offset %llu, " 337 "length %llu, links %u (path: %s)\n", swarn->errstr, 338 swarn->logical, rcu_str_deref(swarn->dev->name), 339 (unsigned long long)swarn->sector, root, inum, offset, 340 min(isize - offset, (u64)PAGE_SIZE), nlink, 341 (char *)(unsigned long)ipath->fspath->val[i]); 342 343 free_ipath(ipath); 344 return 0; 345 346err: 347 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " 348 "%s, sector %llu, root %llu, inode %llu, offset %llu: path " 349 "resolving failed with ret=%d\n", swarn->errstr, 350 swarn->logical, rcu_str_deref(swarn->dev->name), 351 (unsigned long long)swarn->sector, root, inum, offset, ret); 352 353 free_ipath(ipath); 354 return 0; 355} 356 357static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) 358{ 359 struct btrfs_device *dev; 360 struct btrfs_fs_info *fs_info; 361 struct btrfs_path *path; 362 struct btrfs_key found_key; 363 struct extent_buffer *eb; 364 struct btrfs_extent_item *ei; 365 struct scrub_warning swarn; 366 unsigned long ptr = 0; 367 u64 extent_item_pos; 368 u64 flags = 0; 369 u64 ref_root; 370 u32 item_size; 371 u8 ref_level; 372 const int bufsize = 4096; 373 int ret; 374 375 WARN_ON(sblock->page_count < 1); 376 dev = sblock->pagev[0]->dev; 377 fs_info = sblock->sctx->dev_root->fs_info; 378 379 path = btrfs_alloc_path(); 380 381 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS); 382 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS); 383 swarn.sector = (sblock->pagev[0]->physical) >> 9; 384 swarn.logical = sblock->pagev[0]->logical; 385 swarn.errstr = errstr; 386 swarn.dev = NULL; 387 swarn.msg_bufsize = bufsize; 388 swarn.scratch_bufsize = bufsize; 389 390 if (!path || !swarn.scratch_buf || !swarn.msg_buf) 391 goto out; 392 393 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, 394 &flags); 395 if (ret < 0) 396 goto out; 397 398 extent_item_pos = swarn.logical - found_key.objectid; 399 swarn.extent_item_size = found_key.offset; 400 401 eb = path->nodes[0]; 402 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 403 item_size = btrfs_item_size_nr(eb, path->slots[0]); 404 btrfs_release_path(path); 405 406 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 407 do { 408 ret = tree_backref_for_extent(&ptr, eb, ei, item_size, 409 &ref_root, &ref_level); 410 printk_in_rcu(KERN_WARNING 411 "btrfs: %s at logical %llu on dev %s, " 412 "sector %llu: metadata %s (level %d) in tree " 413 "%llu\n", errstr, swarn.logical, 414 rcu_str_deref(dev->name), 415 (unsigned long long)swarn.sector, 416 ref_level ? "node" : "leaf", 417 ret < 0 ? -1 : ref_level, 418 ret < 0 ? -1 : ref_root); 419 } while (ret != 1); 420 } else { 421 swarn.path = path; 422 swarn.dev = dev; 423 iterate_extent_inodes(fs_info, found_key.objectid, 424 extent_item_pos, 1, 425 scrub_print_warning_inode, &swarn); 426 } 427 428out: 429 btrfs_free_path(path); 430 kfree(swarn.scratch_buf); 431 kfree(swarn.msg_buf); 432} 433 434static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx) 435{ 436 struct page *page = NULL; 437 unsigned long index; 438 struct scrub_fixup_nodatasum *fixup = ctx; 439 int ret; 440 int corrected = 0; 441 struct btrfs_key key; 442 struct inode *inode = NULL; 443 u64 end = offset + PAGE_SIZE - 1; 444 struct btrfs_root *local_root; 445 446 key.objectid = root; 447 key.type = BTRFS_ROOT_ITEM_KEY; 448 key.offset = (u64)-1; 449 local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key); 450 if (IS_ERR(local_root)) 451 return PTR_ERR(local_root); 452 453 key.type = BTRFS_INODE_ITEM_KEY; 454 key.objectid = inum; 455 key.offset = 0; 456 inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL); 457 if (IS_ERR(inode)) 458 return PTR_ERR(inode); 459 460 index = offset >> PAGE_CACHE_SHIFT; 461 462 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 463 if (!page) { 464 ret = -ENOMEM; 465 goto out; 466 } 467 468 if (PageUptodate(page)) { 469 struct btrfs_mapping_tree *map_tree; 470 if (PageDirty(page)) { 471 /* 472 * we need to write the data to the defect sector. the 473 * data that was in that sector is not in memory, 474 * because the page was modified. we must not write the 475 * modified page to that sector. 476 * 477 * TODO: what could be done here: wait for the delalloc 478 * runner to write out that page (might involve 479 * COW) and see whether the sector is still 480 * referenced afterwards. 481 * 482 * For the meantime, we'll treat this error 483 * incorrectable, although there is a chance that a 484 * later scrub will find the bad sector again and that 485 * there's no dirty page in memory, then. 486 */ 487 ret = -EIO; 488 goto out; 489 } 490 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree; 491 ret = repair_io_failure(map_tree, offset, PAGE_SIZE, 492 fixup->logical, page, 493 fixup->mirror_num); 494 unlock_page(page); 495 corrected = !ret; 496 } else { 497 /* 498 * we need to get good data first. the general readpage path 499 * will call repair_io_failure for us, we just have to make 500 * sure we read the bad mirror. 501 */ 502 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, 503 EXTENT_DAMAGED, GFP_NOFS); 504 if (ret) { 505 /* set_extent_bits should give proper error */ 506 WARN_ON(ret > 0); 507 if (ret > 0) 508 ret = -EFAULT; 509 goto out; 510 } 511 512 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page, 513 btrfs_get_extent, 514 fixup->mirror_num); 515 wait_on_page_locked(page); 516 517 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset, 518 end, EXTENT_DAMAGED, 0, NULL); 519 if (!corrected) 520 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, 521 EXTENT_DAMAGED, GFP_NOFS); 522 } 523 524out: 525 if (page) 526 put_page(page); 527 if (inode) 528 iput(inode); 529 530 if (ret < 0) 531 return ret; 532 533 if (ret == 0 && corrected) { 534 /* 535 * we only need to call readpage for one of the inodes belonging 536 * to this extent. so make iterate_extent_inodes stop 537 */ 538 return 1; 539 } 540 541 return -EIO; 542} 543 544static void scrub_fixup_nodatasum(struct btrfs_work *work) 545{ 546 int ret; 547 struct scrub_fixup_nodatasum *fixup; 548 struct scrub_ctx *sctx; 549 struct btrfs_trans_handle *trans = NULL; 550 struct btrfs_fs_info *fs_info; 551 struct btrfs_path *path; 552 int uncorrectable = 0; 553 554 fixup = container_of(work, struct scrub_fixup_nodatasum, work); 555 sctx = fixup->sctx; 556 fs_info = fixup->root->fs_info; 557 558 path = btrfs_alloc_path(); 559 if (!path) { 560 spin_lock(&sctx->stat_lock); 561 ++sctx->stat.malloc_errors; 562 spin_unlock(&sctx->stat_lock); 563 uncorrectable = 1; 564 goto out; 565 } 566 567 trans = btrfs_join_transaction(fixup->root); 568 if (IS_ERR(trans)) { 569 uncorrectable = 1; 570 goto out; 571 } 572 573 /* 574 * the idea is to trigger a regular read through the standard path. we 575 * read a page from the (failed) logical address by specifying the 576 * corresponding copynum of the failed sector. thus, that readpage is 577 * expected to fail. 578 * that is the point where on-the-fly error correction will kick in 579 * (once it's finished) and rewrite the failed sector if a good copy 580 * can be found. 581 */ 582 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info, 583 path, scrub_fixup_readpage, 584 fixup); 585 if (ret < 0) { 586 uncorrectable = 1; 587 goto out; 588 } 589 WARN_ON(ret != 1); 590 591 spin_lock(&sctx->stat_lock); 592 ++sctx->stat.corrected_errors; 593 spin_unlock(&sctx->stat_lock); 594 595out: 596 if (trans && !IS_ERR(trans)) 597 btrfs_end_transaction(trans, fixup->root); 598 if (uncorrectable) { 599 spin_lock(&sctx->stat_lock); 600 ++sctx->stat.uncorrectable_errors; 601 spin_unlock(&sctx->stat_lock); 602 603 printk_ratelimited_in_rcu(KERN_ERR 604 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", 605 (unsigned long long)fixup->logical, 606 rcu_str_deref(fixup->dev->name)); 607 } 608 609 btrfs_free_path(path); 610 kfree(fixup); 611 612 /* see caller why we're pretending to be paused in the scrub counters */ 613 mutex_lock(&fs_info->scrub_lock); 614 atomic_dec(&fs_info->scrubs_running); 615 atomic_dec(&fs_info->scrubs_paused); 616 mutex_unlock(&fs_info->scrub_lock); 617 atomic_dec(&sctx->fixup_cnt); 618 wake_up(&fs_info->scrub_pause_wait); 619 wake_up(&sctx->list_wait); 620} 621 622/* 623 * scrub_handle_errored_block gets called when either verification of the 624 * pages failed or the bio failed to read, e.g. with EIO. In the latter 625 * case, this function handles all pages in the bio, even though only one 626 * may be bad. 627 * The goal of this function is to repair the errored block by using the 628 * contents of one of the mirrors. 629 */ 630static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) 631{ 632 struct scrub_ctx *sctx = sblock_to_check->sctx; 633 struct btrfs_device *dev; 634 struct btrfs_fs_info *fs_info; 635 u64 length; 636 u64 logical; 637 u64 generation; 638 unsigned int failed_mirror_index; 639 unsigned int is_metadata; 640 unsigned int have_csum; 641 u8 *csum; 642 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ 643 struct scrub_block *sblock_bad; 644 int ret; 645 int mirror_index; 646 int page_num; 647 int success; 648 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 649 DEFAULT_RATELIMIT_BURST); 650 651 BUG_ON(sblock_to_check->page_count < 1); 652 fs_info = sctx->dev_root->fs_info; 653 length = sblock_to_check->page_count * PAGE_SIZE; 654 logical = sblock_to_check->pagev[0]->logical; 655 generation = sblock_to_check->pagev[0]->generation; 656 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1); 657 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1; 658 is_metadata = !(sblock_to_check->pagev[0]->flags & 659 BTRFS_EXTENT_FLAG_DATA); 660 have_csum = sblock_to_check->pagev[0]->have_csum; 661 csum = sblock_to_check->pagev[0]->csum; 662 dev = sblock_to_check->pagev[0]->dev; 663 664 /* 665 * read all mirrors one after the other. This includes to 666 * re-read the extent or metadata block that failed (that was 667 * the cause that this fixup code is called) another time, 668 * page by page this time in order to know which pages 669 * caused I/O errors and which ones are good (for all mirrors). 670 * It is the goal to handle the situation when more than one 671 * mirror contains I/O errors, but the errors do not 672 * overlap, i.e. the data can be repaired by selecting the 673 * pages from those mirrors without I/O error on the 674 * particular pages. One example (with blocks >= 2 * PAGE_SIZE) 675 * would be that mirror #1 has an I/O error on the first page, 676 * the second page is good, and mirror #2 has an I/O error on 677 * the second page, but the first page is good. 678 * Then the first page of the first mirror can be repaired by 679 * taking the first page of the second mirror, and the 680 * second page of the second mirror can be repaired by 681 * copying the contents of the 2nd page of the 1st mirror. 682 * One more note: if the pages of one mirror contain I/O 683 * errors, the checksum cannot be verified. In order to get 684 * the best data for repairing, the first attempt is to find 685 * a mirror without I/O errors and with a validated checksum. 686 * Only if this is not possible, the pages are picked from 687 * mirrors with I/O errors without considering the checksum. 688 * If the latter is the case, at the end, the checksum of the 689 * repaired area is verified in order to correctly maintain 690 * the statistics. 691 */ 692 693 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS * 694 sizeof(*sblocks_for_recheck), 695 GFP_NOFS); 696 if (!sblocks_for_recheck) { 697 spin_lock(&sctx->stat_lock); 698 sctx->stat.malloc_errors++; 699 sctx->stat.read_errors++; 700 sctx->stat.uncorrectable_errors++; 701 spin_unlock(&sctx->stat_lock); 702 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 703 goto out; 704 } 705 706 /* setup the context, map the logical blocks and alloc the pages */ 707 ret = scrub_setup_recheck_block(sctx, &fs_info->mapping_tree, length, 708 logical, sblocks_for_recheck); 709 if (ret) { 710 spin_lock(&sctx->stat_lock); 711 sctx->stat.read_errors++; 712 sctx->stat.uncorrectable_errors++; 713 spin_unlock(&sctx->stat_lock); 714 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 715 goto out; 716 } 717 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); 718 sblock_bad = sblocks_for_recheck + failed_mirror_index; 719 720 /* build and submit the bios for the failed mirror, check checksums */ 721 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum, 722 csum, generation, sctx->csum_size); 723 724 if (!sblock_bad->header_error && !sblock_bad->checksum_error && 725 sblock_bad->no_io_error_seen) { 726 /* 727 * the error disappeared after reading page by page, or 728 * the area was part of a huge bio and other parts of the 729 * bio caused I/O errors, or the block layer merged several 730 * read requests into one and the error is caused by a 731 * different bio (usually one of the two latter cases is 732 * the cause) 733 */ 734 spin_lock(&sctx->stat_lock); 735 sctx->stat.unverified_errors++; 736 spin_unlock(&sctx->stat_lock); 737 738 goto out; 739 } 740 741 if (!sblock_bad->no_io_error_seen) { 742 spin_lock(&sctx->stat_lock); 743 sctx->stat.read_errors++; 744 spin_unlock(&sctx->stat_lock); 745 if (__ratelimit(&_rs)) 746 scrub_print_warning("i/o error", sblock_to_check); 747 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 748 } else if (sblock_bad->checksum_error) { 749 spin_lock(&sctx->stat_lock); 750 sctx->stat.csum_errors++; 751 spin_unlock(&sctx->stat_lock); 752 if (__ratelimit(&_rs)) 753 scrub_print_warning("checksum error", sblock_to_check); 754 btrfs_dev_stat_inc_and_print(dev, 755 BTRFS_DEV_STAT_CORRUPTION_ERRS); 756 } else if (sblock_bad->header_error) { 757 spin_lock(&sctx->stat_lock); 758 sctx->stat.verify_errors++; 759 spin_unlock(&sctx->stat_lock); 760 if (__ratelimit(&_rs)) 761 scrub_print_warning("checksum/header error", 762 sblock_to_check); 763 if (sblock_bad->generation_error) 764 btrfs_dev_stat_inc_and_print(dev, 765 BTRFS_DEV_STAT_GENERATION_ERRS); 766 else 767 btrfs_dev_stat_inc_and_print(dev, 768 BTRFS_DEV_STAT_CORRUPTION_ERRS); 769 } 770 771 if (sctx->readonly) 772 goto did_not_correct_error; 773 774 if (!is_metadata && !have_csum) { 775 struct scrub_fixup_nodatasum *fixup_nodatasum; 776 777 /* 778 * !is_metadata and !have_csum, this means that the data 779 * might not be COW'ed, that it might be modified 780 * concurrently. The general strategy to work on the 781 * commit root does not help in the case when COW is not 782 * used. 783 */ 784 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS); 785 if (!fixup_nodatasum) 786 goto did_not_correct_error; 787 fixup_nodatasum->sctx = sctx; 788 fixup_nodatasum->dev = dev; 789 fixup_nodatasum->logical = logical; 790 fixup_nodatasum->root = fs_info->extent_root; 791 fixup_nodatasum->mirror_num = failed_mirror_index + 1; 792 /* 793 * increment scrubs_running to prevent cancel requests from 794 * completing as long as a fixup worker is running. we must also 795 * increment scrubs_paused to prevent deadlocking on pause 796 * requests used for transactions commits (as the worker uses a 797 * transaction context). it is safe to regard the fixup worker 798 * as paused for all matters practical. effectively, we only 799 * avoid cancellation requests from completing. 800 */ 801 mutex_lock(&fs_info->scrub_lock); 802 atomic_inc(&fs_info->scrubs_running); 803 atomic_inc(&fs_info->scrubs_paused); 804 mutex_unlock(&fs_info->scrub_lock); 805 atomic_inc(&sctx->fixup_cnt); 806 fixup_nodatasum->work.func = scrub_fixup_nodatasum; 807 btrfs_queue_worker(&fs_info->scrub_workers, 808 &fixup_nodatasum->work); 809 goto out; 810 } 811 812 /* 813 * now build and submit the bios for the other mirrors, check 814 * checksums. 815 * First try to pick the mirror which is completely without I/O 816 * errors and also does not have a checksum error. 817 * If one is found, and if a checksum is present, the full block 818 * that is known to contain an error is rewritten. Afterwards 819 * the block is known to be corrected. 820 * If a mirror is found which is completely correct, and no 821 * checksum is present, only those pages are rewritten that had 822 * an I/O error in the block to be repaired, since it cannot be 823 * determined, which copy of the other pages is better (and it 824 * could happen otherwise that a correct page would be 825 * overwritten by a bad one). 826 */ 827 for (mirror_index = 0; 828 mirror_index < BTRFS_MAX_MIRRORS && 829 sblocks_for_recheck[mirror_index].page_count > 0; 830 mirror_index++) { 831 struct scrub_block *sblock_other; 832 833 if (mirror_index == failed_mirror_index) 834 continue; 835 sblock_other = sblocks_for_recheck + mirror_index; 836 837 /* build and submit the bios, check checksums */ 838 scrub_recheck_block(fs_info, sblock_other, is_metadata, 839 have_csum, csum, generation, 840 sctx->csum_size); 841 842 if (!sblock_other->header_error && 843 !sblock_other->checksum_error && 844 sblock_other->no_io_error_seen) { 845 int force_write = is_metadata || have_csum; 846 847 ret = scrub_repair_block_from_good_copy(sblock_bad, 848 sblock_other, 849 force_write); 850 if (0 == ret) 851 goto corrected_error; 852 } 853 } 854 855 /* 856 * in case of I/O errors in the area that is supposed to be 857 * repaired, continue by picking good copies of those pages. 858 * Select the good pages from mirrors to rewrite bad pages from 859 * the area to fix. Afterwards verify the checksum of the block 860 * that is supposed to be repaired. This verification step is 861 * only done for the purpose of statistic counting and for the 862 * final scrub report, whether errors remain. 863 * A perfect algorithm could make use of the checksum and try 864 * all possible combinations of pages from the different mirrors 865 * until the checksum verification succeeds. For example, when 866 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page 867 * of mirror #2 is readable but the final checksum test fails, 868 * then the 2nd page of mirror #3 could be tried, whether now 869 * the final checksum succeedes. But this would be a rare 870 * exception and is therefore not implemented. At least it is 871 * avoided that the good copy is overwritten. 872 * A more useful improvement would be to pick the sectors 873 * without I/O error based on sector sizes (512 bytes on legacy 874 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one 875 * mirror could be repaired by taking 512 byte of a different 876 * mirror, even if other 512 byte sectors in the same PAGE_SIZE 877 * area are unreadable. 878 */ 879 880 /* can only fix I/O errors from here on */ 881 if (sblock_bad->no_io_error_seen) 882 goto did_not_correct_error; 883 884 success = 1; 885 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { 886 struct scrub_page *page_bad = sblock_bad->pagev[page_num]; 887 888 if (!page_bad->io_error) 889 continue; 890 891 for (mirror_index = 0; 892 mirror_index < BTRFS_MAX_MIRRORS && 893 sblocks_for_recheck[mirror_index].page_count > 0; 894 mirror_index++) { 895 struct scrub_block *sblock_other = sblocks_for_recheck + 896 mirror_index; 897 struct scrub_page *page_other = sblock_other->pagev[ 898 page_num]; 899 900 if (!page_other->io_error) { 901 ret = scrub_repair_page_from_good_copy( 902 sblock_bad, sblock_other, page_num, 0); 903 if (0 == ret) { 904 page_bad->io_error = 0; 905 break; /* succeeded for this page */ 906 } 907 } 908 } 909 910 if (page_bad->io_error) { 911 /* did not find a mirror to copy the page from */ 912 success = 0; 913 } 914 } 915 916 if (success) { 917 if (is_metadata || have_csum) { 918 /* 919 * need to verify the checksum now that all 920 * sectors on disk are repaired (the write 921 * request for data to be repaired is on its way). 922 * Just be lazy and use scrub_recheck_block() 923 * which re-reads the data before the checksum 924 * is verified, but most likely the data comes out 925 * of the page cache. 926 */ 927 scrub_recheck_block(fs_info, sblock_bad, 928 is_metadata, have_csum, csum, 929 generation, sctx->csum_size); 930 if (!sblock_bad->header_error && 931 !sblock_bad->checksum_error && 932 sblock_bad->no_io_error_seen) 933 goto corrected_error; 934 else 935 goto did_not_correct_error; 936 } else { 937corrected_error: 938 spin_lock(&sctx->stat_lock); 939 sctx->stat.corrected_errors++; 940 spin_unlock(&sctx->stat_lock); 941 printk_ratelimited_in_rcu(KERN_ERR 942 "btrfs: fixed up error at logical %llu on dev %s\n", 943 (unsigned long long)logical, 944 rcu_str_deref(dev->name)); 945 } 946 } else { 947did_not_correct_error: 948 spin_lock(&sctx->stat_lock); 949 sctx->stat.uncorrectable_errors++; 950 spin_unlock(&sctx->stat_lock); 951 printk_ratelimited_in_rcu(KERN_ERR 952 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", 953 (unsigned long long)logical, 954 rcu_str_deref(dev->name)); 955 } 956 957out: 958 if (sblocks_for_recheck) { 959 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; 960 mirror_index++) { 961 struct scrub_block *sblock = sblocks_for_recheck + 962 mirror_index; 963 int page_index; 964 965 for (page_index = 0; page_index < sblock->page_count; 966 page_index++) { 967 sblock->pagev[page_index]->sblock = NULL; 968 scrub_page_put(sblock->pagev[page_index]); 969 } 970 } 971 kfree(sblocks_for_recheck); 972 } 973 974 return 0; 975} 976 977static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 978 struct btrfs_mapping_tree *map_tree, 979 u64 length, u64 logical, 980 struct scrub_block *sblocks_for_recheck) 981{ 982 int page_index; 983 int mirror_index; 984 int ret; 985 986 /* 987 * note: the two members ref_count and outstanding_pages 988 * are not used (and not set) in the blocks that are used for 989 * the recheck procedure 990 */ 991 992 page_index = 0; 993 while (length > 0) { 994 u64 sublen = min_t(u64, length, PAGE_SIZE); 995 u64 mapped_length = sublen; 996 struct btrfs_bio *bbio = NULL; 997 998 /* 999 * with a length of PAGE_SIZE, each returned stripe 1000 * represents one mirror 1001 */ 1002 ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length, 1003 &bbio, 0); 1004 if (ret || !bbio || mapped_length < sublen) { 1005 kfree(bbio); 1006 return -EIO; 1007 } 1008 1009 BUG_ON(page_index >= SCRUB_PAGES_PER_BIO); 1010 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes; 1011 mirror_index++) { 1012 struct scrub_block *sblock; 1013 struct scrub_page *page; 1014 1015 if (mirror_index >= BTRFS_MAX_MIRRORS) 1016 continue; 1017 1018 sblock = sblocks_for_recheck + mirror_index; 1019 sblock->sctx = sctx; 1020 page = kzalloc(sizeof(*page), GFP_NOFS); 1021 if (!page) { 1022leave_nomem: 1023 spin_lock(&sctx->stat_lock); 1024 sctx->stat.malloc_errors++; 1025 spin_unlock(&sctx->stat_lock); 1026 kfree(bbio); 1027 return -ENOMEM; 1028 } 1029 scrub_page_get(page); 1030 sblock->pagev[page_index] = page; 1031 page->logical = logical; 1032 page->physical = bbio->stripes[mirror_index].physical; 1033 /* for missing devices, dev->bdev is NULL */ 1034 page->dev = bbio->stripes[mirror_index].dev; 1035 page->mirror_num = mirror_index + 1; 1036 sblock->page_count++; 1037 page->page = alloc_page(GFP_NOFS); 1038 if (!page->page) 1039 goto leave_nomem; 1040 } 1041 kfree(bbio); 1042 length -= sublen; 1043 logical += sublen; 1044 page_index++; 1045 } 1046 1047 return 0; 1048} 1049 1050/* 1051 * this function will check the on disk data for checksum errors, header 1052 * errors and read I/O errors. If any I/O errors happen, the exact pages 1053 * which are errored are marked as being bad. The goal is to enable scrub 1054 * to take those pages that are not errored from all the mirrors so that 1055 * the pages that are errored in the just handled mirror can be repaired. 1056 */ 1057static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 1058 struct scrub_block *sblock, int is_metadata, 1059 int have_csum, u8 *csum, u64 generation, 1060 u16 csum_size) 1061{ 1062 int page_num; 1063 1064 sblock->no_io_error_seen = 1; 1065 sblock->header_error = 0; 1066 sblock->checksum_error = 0; 1067 1068 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1069 struct bio *bio; 1070 struct scrub_page *page = sblock->pagev[page_num]; 1071 DECLARE_COMPLETION_ONSTACK(complete); 1072 1073 if (page->dev->bdev == NULL) { 1074 page->io_error = 1; 1075 sblock->no_io_error_seen = 0; 1076 continue; 1077 } 1078 1079 WARN_ON(!page->page); 1080 bio = bio_alloc(GFP_NOFS, 1); 1081 if (!bio) { 1082 page->io_error = 1; 1083 sblock->no_io_error_seen = 0; 1084 continue; 1085 } 1086 bio->bi_bdev = page->dev->bdev; 1087 bio->bi_sector = page->physical >> 9; 1088 bio->bi_end_io = scrub_complete_bio_end_io; 1089 bio->bi_private = &complete; 1090 1091 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1092 btrfsic_submit_bio(READ, bio); 1093 1094 /* this will also unplug the queue */ 1095 wait_for_completion(&complete); 1096 1097 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags); 1098 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1099 sblock->no_io_error_seen = 0; 1100 bio_put(bio); 1101 } 1102 1103 if (sblock->no_io_error_seen) 1104 scrub_recheck_block_checksum(fs_info, sblock, is_metadata, 1105 have_csum, csum, generation, 1106 csum_size); 1107 1108 return; 1109} 1110 1111static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, 1112 struct scrub_block *sblock, 1113 int is_metadata, int have_csum, 1114 const u8 *csum, u64 generation, 1115 u16 csum_size) 1116{ 1117 int page_num; 1118 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1119 u32 crc = ~(u32)0; 1120 struct btrfs_root *root = fs_info->extent_root; 1121 void *mapped_buffer; 1122 1123 WARN_ON(!sblock->pagev[0]->page); 1124 if (is_metadata) { 1125 struct btrfs_header *h; 1126 1127 mapped_buffer = kmap_atomic(sblock->pagev[0]->page); 1128 h = (struct btrfs_header *)mapped_buffer; 1129 1130 if (sblock->pagev[0]->logical != le64_to_cpu(h->bytenr) || 1131 memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) || 1132 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 1133 BTRFS_UUID_SIZE)) { 1134 sblock->header_error = 1; 1135 } else if (generation != le64_to_cpu(h->generation)) { 1136 sblock->header_error = 1; 1137 sblock->generation_error = 1; 1138 } 1139 csum = h->csum; 1140 } else { 1141 if (!have_csum) 1142 return; 1143 1144 mapped_buffer = kmap_atomic(sblock->pagev[0]->page); 1145 } 1146 1147 for (page_num = 0;;) { 1148 if (page_num == 0 && is_metadata) 1149 crc = btrfs_csum_data(root, 1150 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE, 1151 crc, PAGE_SIZE - BTRFS_CSUM_SIZE); 1152 else 1153 crc = btrfs_csum_data(root, mapped_buffer, crc, 1154 PAGE_SIZE); 1155 1156 kunmap_atomic(mapped_buffer); 1157 page_num++; 1158 if (page_num >= sblock->page_count) 1159 break; 1160 WARN_ON(!sblock->pagev[page_num]->page); 1161 1162 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page); 1163 } 1164 1165 btrfs_csum_final(crc, calculated_csum); 1166 if (memcmp(calculated_csum, csum, csum_size)) 1167 sblock->checksum_error = 1; 1168} 1169 1170static void scrub_complete_bio_end_io(struct bio *bio, int err) 1171{ 1172 complete((struct completion *)bio->bi_private); 1173} 1174 1175static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1176 struct scrub_block *sblock_good, 1177 int force_write) 1178{ 1179 int page_num; 1180 int ret = 0; 1181 1182 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { 1183 int ret_sub; 1184 1185 ret_sub = scrub_repair_page_from_good_copy(sblock_bad, 1186 sblock_good, 1187 page_num, 1188 force_write); 1189 if (ret_sub) 1190 ret = ret_sub; 1191 } 1192 1193 return ret; 1194} 1195 1196static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, 1197 struct scrub_block *sblock_good, 1198 int page_num, int force_write) 1199{ 1200 struct scrub_page *page_bad = sblock_bad->pagev[page_num]; 1201 struct scrub_page *page_good = sblock_good->pagev[page_num]; 1202 1203 BUG_ON(page_bad->page == NULL); 1204 BUG_ON(page_good->page == NULL); 1205 if (force_write || sblock_bad->header_error || 1206 sblock_bad->checksum_error || page_bad->io_error) { 1207 struct bio *bio; 1208 int ret; 1209 DECLARE_COMPLETION_ONSTACK(complete); 1210 1211 bio = bio_alloc(GFP_NOFS, 1); 1212 if (!bio) 1213 return -EIO; 1214 bio->bi_bdev = page_bad->dev->bdev; 1215 bio->bi_sector = page_bad->physical >> 9; 1216 bio->bi_end_io = scrub_complete_bio_end_io; 1217 bio->bi_private = &complete; 1218 1219 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1220 if (PAGE_SIZE != ret) { 1221 bio_put(bio); 1222 return -EIO; 1223 } 1224 btrfsic_submit_bio(WRITE, bio); 1225 1226 /* this will also unplug the queue */ 1227 wait_for_completion(&complete); 1228 if (!bio_flagged(bio, BIO_UPTODATE)) { 1229 btrfs_dev_stat_inc_and_print(page_bad->dev, 1230 BTRFS_DEV_STAT_WRITE_ERRS); 1231 bio_put(bio); 1232 return -EIO; 1233 } 1234 bio_put(bio); 1235 } 1236 1237 return 0; 1238} 1239 1240static void scrub_checksum(struct scrub_block *sblock) 1241{ 1242 u64 flags; 1243 int ret; 1244 1245 WARN_ON(sblock->page_count < 1); 1246 flags = sblock->pagev[0]->flags; 1247 ret = 0; 1248 if (flags & BTRFS_EXTENT_FLAG_DATA) 1249 ret = scrub_checksum_data(sblock); 1250 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1251 ret = scrub_checksum_tree_block(sblock); 1252 else if (flags & BTRFS_EXTENT_FLAG_SUPER) 1253 (void)scrub_checksum_super(sblock); 1254 else 1255 WARN_ON(1); 1256 if (ret) 1257 scrub_handle_errored_block(sblock); 1258} 1259 1260static int scrub_checksum_data(struct scrub_block *sblock) 1261{ 1262 struct scrub_ctx *sctx = sblock->sctx; 1263 u8 csum[BTRFS_CSUM_SIZE]; 1264 u8 *on_disk_csum; 1265 struct page *page; 1266 void *buffer; 1267 u32 crc = ~(u32)0; 1268 int fail = 0; 1269 struct btrfs_root *root = sctx->dev_root; 1270 u64 len; 1271 int index; 1272 1273 BUG_ON(sblock->page_count < 1); 1274 if (!sblock->pagev[0]->have_csum) 1275 return 0; 1276 1277 on_disk_csum = sblock->pagev[0]->csum; 1278 page = sblock->pagev[0]->page; 1279 buffer = kmap_atomic(page); 1280 1281 len = sctx->sectorsize; 1282 index = 0; 1283 for (;;) { 1284 u64 l = min_t(u64, len, PAGE_SIZE); 1285 1286 crc = btrfs_csum_data(root, buffer, crc, l); 1287 kunmap_atomic(buffer); 1288 len -= l; 1289 if (len == 0) 1290 break; 1291 index++; 1292 BUG_ON(index >= sblock->page_count); 1293 BUG_ON(!sblock->pagev[index]->page); 1294 page = sblock->pagev[index]->page; 1295 buffer = kmap_atomic(page); 1296 } 1297 1298 btrfs_csum_final(crc, csum); 1299 if (memcmp(csum, on_disk_csum, sctx->csum_size)) 1300 fail = 1; 1301 1302 return fail; 1303} 1304 1305static int scrub_checksum_tree_block(struct scrub_block *sblock) 1306{ 1307 struct scrub_ctx *sctx = sblock->sctx; 1308 struct btrfs_header *h; 1309 struct btrfs_root *root = sctx->dev_root; 1310 struct btrfs_fs_info *fs_info = root->fs_info; 1311 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1312 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1313 struct page *page; 1314 void *mapped_buffer; 1315 u64 mapped_size; 1316 void *p; 1317 u32 crc = ~(u32)0; 1318 int fail = 0; 1319 int crc_fail = 0; 1320 u64 len; 1321 int index; 1322 1323 BUG_ON(sblock->page_count < 1); 1324 page = sblock->pagev[0]->page; 1325 mapped_buffer = kmap_atomic(page); 1326 h = (struct btrfs_header *)mapped_buffer; 1327 memcpy(on_disk_csum, h->csum, sctx->csum_size); 1328 1329 /* 1330 * we don't use the getter functions here, as we 1331 * a) don't have an extent buffer and 1332 * b) the page is already kmapped 1333 */ 1334 1335 if (sblock->pagev[0]->logical != le64_to_cpu(h->bytenr)) 1336 ++fail; 1337 1338 if (sblock->pagev[0]->generation != le64_to_cpu(h->generation)) 1339 ++fail; 1340 1341 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1342 ++fail; 1343 1344 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 1345 BTRFS_UUID_SIZE)) 1346 ++fail; 1347 1348 BUG_ON(sctx->nodesize != sctx->leafsize); 1349 len = sctx->nodesize - BTRFS_CSUM_SIZE; 1350 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; 1351 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; 1352 index = 0; 1353 for (;;) { 1354 u64 l = min_t(u64, len, mapped_size); 1355 1356 crc = btrfs_csum_data(root, p, crc, l); 1357 kunmap_atomic(mapped_buffer); 1358 len -= l; 1359 if (len == 0) 1360 break; 1361 index++; 1362 BUG_ON(index >= sblock->page_count); 1363 BUG_ON(!sblock->pagev[index]->page); 1364 page = sblock->pagev[index]->page; 1365 mapped_buffer = kmap_atomic(page); 1366 mapped_size = PAGE_SIZE; 1367 p = mapped_buffer; 1368 } 1369 1370 btrfs_csum_final(crc, calculated_csum); 1371 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) 1372 ++crc_fail; 1373 1374 return fail || crc_fail; 1375} 1376 1377static int scrub_checksum_super(struct scrub_block *sblock) 1378{ 1379 struct btrfs_super_block *s; 1380 struct scrub_ctx *sctx = sblock->sctx; 1381 struct btrfs_root *root = sctx->dev_root; 1382 struct btrfs_fs_info *fs_info = root->fs_info; 1383 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1384 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1385 struct page *page; 1386 void *mapped_buffer; 1387 u64 mapped_size; 1388 void *p; 1389 u32 crc = ~(u32)0; 1390 int fail_gen = 0; 1391 int fail_cor = 0; 1392 u64 len; 1393 int index; 1394 1395 BUG_ON(sblock->page_count < 1); 1396 page = sblock->pagev[0]->page; 1397 mapped_buffer = kmap_atomic(page); 1398 s = (struct btrfs_super_block *)mapped_buffer; 1399 memcpy(on_disk_csum, s->csum, sctx->csum_size); 1400 1401 if (sblock->pagev[0]->logical != le64_to_cpu(s->bytenr)) 1402 ++fail_cor; 1403 1404 if (sblock->pagev[0]->generation != le64_to_cpu(s->generation)) 1405 ++fail_gen; 1406 1407 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1408 ++fail_cor; 1409 1410 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; 1411 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; 1412 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; 1413 index = 0; 1414 for (;;) { 1415 u64 l = min_t(u64, len, mapped_size); 1416 1417 crc = btrfs_csum_data(root, p, crc, l); 1418 kunmap_atomic(mapped_buffer); 1419 len -= l; 1420 if (len == 0) 1421 break; 1422 index++; 1423 BUG_ON(index >= sblock->page_count); 1424 BUG_ON(!sblock->pagev[index]->page); 1425 page = sblock->pagev[index]->page; 1426 mapped_buffer = kmap_atomic(page); 1427 mapped_size = PAGE_SIZE; 1428 p = mapped_buffer; 1429 } 1430 1431 btrfs_csum_final(crc, calculated_csum); 1432 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) 1433 ++fail_cor; 1434 1435 if (fail_cor + fail_gen) { 1436 /* 1437 * if we find an error in a super block, we just report it. 1438 * They will get written with the next transaction commit 1439 * anyway 1440 */ 1441 spin_lock(&sctx->stat_lock); 1442 ++sctx->stat.super_errors; 1443 spin_unlock(&sctx->stat_lock); 1444 if (fail_cor) 1445 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, 1446 BTRFS_DEV_STAT_CORRUPTION_ERRS); 1447 else 1448 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, 1449 BTRFS_DEV_STAT_GENERATION_ERRS); 1450 } 1451 1452 return fail_cor + fail_gen; 1453} 1454 1455static void scrub_block_get(struct scrub_block *sblock) 1456{ 1457 atomic_inc(&sblock->ref_count); 1458} 1459 1460static void scrub_block_put(struct scrub_block *sblock) 1461{ 1462 if (atomic_dec_and_test(&sblock->ref_count)) { 1463 int i; 1464 1465 for (i = 0; i < sblock->page_count; i++) 1466 scrub_page_put(sblock->pagev[i]); 1467 kfree(sblock); 1468 } 1469} 1470 1471static void scrub_page_get(struct scrub_page *spage) 1472{ 1473 atomic_inc(&spage->ref_count); 1474} 1475 1476static void scrub_page_put(struct scrub_page *spage) 1477{ 1478 if (atomic_dec_and_test(&spage->ref_count)) { 1479 if (spage->page) 1480 __free_page(spage->page); 1481 kfree(spage); 1482 } 1483} 1484 1485static void scrub_submit(struct scrub_ctx *sctx) 1486{ 1487 struct scrub_bio *sbio; 1488 1489 if (sctx->curr == -1) 1490 return; 1491 1492 sbio = sctx->bios[sctx->curr]; 1493 sctx->curr = -1; 1494 atomic_inc(&sctx->in_flight); 1495 1496 btrfsic_submit_bio(READ, sbio->bio); 1497} 1498 1499static int scrub_add_page_to_bio(struct scrub_ctx *sctx, 1500 struct scrub_page *spage) 1501{ 1502 struct scrub_block *sblock = spage->sblock; 1503 struct scrub_bio *sbio; 1504 int ret; 1505 1506again: 1507 /* 1508 * grab a fresh bio or wait for one to become available 1509 */ 1510 while (sctx->curr == -1) { 1511 spin_lock(&sctx->list_lock); 1512 sctx->curr = sctx->first_free; 1513 if (sctx->curr != -1) { 1514 sctx->first_free = sctx->bios[sctx->curr]->next_free; 1515 sctx->bios[sctx->curr]->next_free = -1; 1516 sctx->bios[sctx->curr]->page_count = 0; 1517 spin_unlock(&sctx->list_lock); 1518 } else { 1519 spin_unlock(&sctx->list_lock); 1520 wait_event(sctx->list_wait, sctx->first_free != -1); 1521 } 1522 } 1523 sbio = sctx->bios[sctx->curr]; 1524 if (sbio->page_count == 0) { 1525 struct bio *bio; 1526 1527 sbio->physical = spage->physical; 1528 sbio->logical = spage->logical; 1529 sbio->dev = spage->dev; 1530 bio = sbio->bio; 1531 if (!bio) { 1532 bio = bio_alloc(GFP_NOFS, sctx->pages_per_bio); 1533 if (!bio) 1534 return -ENOMEM; 1535 sbio->bio = bio; 1536 } 1537 1538 bio->bi_private = sbio; 1539 bio->bi_end_io = scrub_bio_end_io; 1540 bio->bi_bdev = sbio->dev->bdev; 1541 bio->bi_sector = sbio->physical >> 9; 1542 sbio->err = 0; 1543 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1544 spage->physical || 1545 sbio->logical + sbio->page_count * PAGE_SIZE != 1546 spage->logical || 1547 sbio->dev != spage->dev) { 1548 scrub_submit(sctx); 1549 goto again; 1550 } 1551 1552 sbio->pagev[sbio->page_count] = spage; 1553 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); 1554 if (ret != PAGE_SIZE) { 1555 if (sbio->page_count < 1) { 1556 bio_put(sbio->bio); 1557 sbio->bio = NULL; 1558 return -EIO; 1559 } 1560 scrub_submit(sctx); 1561 goto again; 1562 } 1563 1564 scrub_block_get(sblock); /* one for the added page */ 1565 atomic_inc(&sblock->outstanding_pages); 1566 sbio->page_count++; 1567 if (sbio->page_count == sctx->pages_per_bio) 1568 scrub_submit(sctx); 1569 1570 return 0; 1571} 1572 1573static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 1574 u64 physical, struct btrfs_device *dev, u64 flags, 1575 u64 gen, int mirror_num, u8 *csum, int force) 1576{ 1577 struct scrub_block *sblock; 1578 int index; 1579 1580 sblock = kzalloc(sizeof(*sblock), GFP_NOFS); 1581 if (!sblock) { 1582 spin_lock(&sctx->stat_lock); 1583 sctx->stat.malloc_errors++; 1584 spin_unlock(&sctx->stat_lock); 1585 return -ENOMEM; 1586 } 1587 1588 /* one ref inside this function, plus one for each page added to 1589 * a bio later on */ 1590 atomic_set(&sblock->ref_count, 1); 1591 sblock->sctx = sctx; 1592 sblock->no_io_error_seen = 1; 1593 1594 for (index = 0; len > 0; index++) { 1595 struct scrub_page *spage; 1596 u64 l = min_t(u64, len, PAGE_SIZE); 1597 1598 spage = kzalloc(sizeof(*spage), GFP_NOFS); 1599 if (!spage) { 1600leave_nomem: 1601 spin_lock(&sctx->stat_lock); 1602 sctx->stat.malloc_errors++; 1603 spin_unlock(&sctx->stat_lock); 1604 scrub_block_put(sblock); 1605 return -ENOMEM; 1606 } 1607 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); 1608 scrub_page_get(spage); 1609 sblock->pagev[index] = spage; 1610 spage->sblock = sblock; 1611 spage->dev = dev; 1612 spage->flags = flags; 1613 spage->generation = gen; 1614 spage->logical = logical; 1615 spage->physical = physical; 1616 spage->mirror_num = mirror_num; 1617 if (csum) { 1618 spage->have_csum = 1; 1619 memcpy(spage->csum, csum, sctx->csum_size); 1620 } else { 1621 spage->have_csum = 0; 1622 } 1623 sblock->page_count++; 1624 spage->page = alloc_page(GFP_NOFS); 1625 if (!spage->page) 1626 goto leave_nomem; 1627 len -= l; 1628 logical += l; 1629 physical += l; 1630 } 1631 1632 WARN_ON(sblock->page_count == 0); 1633 for (index = 0; index < sblock->page_count; index++) { 1634 struct scrub_page *spage = sblock->pagev[index]; 1635 int ret; 1636 1637 ret = scrub_add_page_to_bio(sctx, spage); 1638 if (ret) { 1639 scrub_block_put(sblock); 1640 return ret; 1641 } 1642 } 1643 1644 if (force) 1645 scrub_submit(sctx); 1646 1647 /* last one frees, either here or in bio completion for last page */ 1648 scrub_block_put(sblock); 1649 return 0; 1650} 1651 1652static void scrub_bio_end_io(struct bio *bio, int err) 1653{ 1654 struct scrub_bio *sbio = bio->bi_private; 1655 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info; 1656 1657 sbio->err = err; 1658 sbio->bio = bio; 1659 1660 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); 1661} 1662 1663static void scrub_bio_end_io_worker(struct btrfs_work *work) 1664{ 1665 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 1666 struct scrub_ctx *sctx = sbio->sctx; 1667 int i; 1668 1669 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO); 1670 if (sbio->err) { 1671 for (i = 0; i < sbio->page_count; i++) { 1672 struct scrub_page *spage = sbio->pagev[i]; 1673 1674 spage->io_error = 1; 1675 spage->sblock->no_io_error_seen = 0; 1676 } 1677 } 1678 1679 /* now complete the scrub_block items that have all pages completed */ 1680 for (i = 0; i < sbio->page_count; i++) { 1681 struct scrub_page *spage = sbio->pagev[i]; 1682 struct scrub_block *sblock = spage->sblock; 1683 1684 if (atomic_dec_and_test(&sblock->outstanding_pages)) 1685 scrub_block_complete(sblock); 1686 scrub_block_put(sblock); 1687 } 1688 1689 bio_put(sbio->bio); 1690 sbio->bio = NULL; 1691 spin_lock(&sctx->list_lock); 1692 sbio->next_free = sctx->first_free; 1693 sctx->first_free = sbio->index; 1694 spin_unlock(&sctx->list_lock); 1695 atomic_dec(&sctx->in_flight); 1696 wake_up(&sctx->list_wait); 1697} 1698 1699static void scrub_block_complete(struct scrub_block *sblock) 1700{ 1701 if (!sblock->no_io_error_seen) 1702 scrub_handle_errored_block(sblock); 1703 else 1704 scrub_checksum(sblock); 1705} 1706 1707static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len, 1708 u8 *csum) 1709{ 1710 struct btrfs_ordered_sum *sum = NULL; 1711 int ret = 0; 1712 unsigned long i; 1713 unsigned long num_sectors; 1714 1715 while (!list_empty(&sctx->csum_list)) { 1716 sum = list_first_entry(&sctx->csum_list, 1717 struct btrfs_ordered_sum, list); 1718 if (sum->bytenr > logical) 1719 return 0; 1720 if (sum->bytenr + sum->len > logical) 1721 break; 1722 1723 ++sctx->stat.csum_discards; 1724 list_del(&sum->list); 1725 kfree(sum); 1726 sum = NULL; 1727 } 1728 if (!sum) 1729 return 0; 1730 1731 num_sectors = sum->len / sctx->sectorsize; 1732 for (i = 0; i < num_sectors; ++i) { 1733 if (sum->sums[i].bytenr == logical) { 1734 memcpy(csum, &sum->sums[i].sum, sctx->csum_size); 1735 ret = 1; 1736 break; 1737 } 1738 } 1739 if (ret && i == num_sectors - 1) { 1740 list_del(&sum->list); 1741 kfree(sum); 1742 } 1743 return ret; 1744} 1745 1746/* scrub extent tries to collect up to 64 kB for each bio */ 1747static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, 1748 u64 physical, struct btrfs_device *dev, u64 flags, 1749 u64 gen, int mirror_num) 1750{ 1751 int ret; 1752 u8 csum[BTRFS_CSUM_SIZE]; 1753 u32 blocksize; 1754 1755 if (flags & BTRFS_EXTENT_FLAG_DATA) { 1756 blocksize = sctx->sectorsize; 1757 spin_lock(&sctx->stat_lock); 1758 sctx->stat.data_extents_scrubbed++; 1759 sctx->stat.data_bytes_scrubbed += len; 1760 spin_unlock(&sctx->stat_lock); 1761 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1762 BUG_ON(sctx->nodesize != sctx->leafsize); 1763 blocksize = sctx->nodesize; 1764 spin_lock(&sctx->stat_lock); 1765 sctx->stat.tree_extents_scrubbed++; 1766 sctx->stat.tree_bytes_scrubbed += len; 1767 spin_unlock(&sctx->stat_lock); 1768 } else { 1769 blocksize = sctx->sectorsize; 1770 BUG_ON(1); 1771 } 1772 1773 while (len) { 1774 u64 l = min_t(u64, len, blocksize); 1775 int have_csum = 0; 1776 1777 if (flags & BTRFS_EXTENT_FLAG_DATA) { 1778 /* push csums to sbio */ 1779 have_csum = scrub_find_csum(sctx, logical, l, csum); 1780 if (have_csum == 0) 1781 ++sctx->stat.no_csum; 1782 } 1783 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, 1784 mirror_num, have_csum ? csum : NULL, 0); 1785 if (ret) 1786 return ret; 1787 len -= l; 1788 logical += l; 1789 physical += l; 1790 } 1791 return 0; 1792} 1793 1794static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 1795 struct map_lookup *map, 1796 struct btrfs_device *scrub_dev, 1797 int num, u64 base, u64 length) 1798{ 1799 struct btrfs_path *path; 1800 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 1801 struct btrfs_root *root = fs_info->extent_root; 1802 struct btrfs_root *csum_root = fs_info->csum_root; 1803 struct btrfs_extent_item *extent; 1804 struct blk_plug plug; 1805 u64 flags; 1806 int ret; 1807 int slot; 1808 int i; 1809 u64 nstripes; 1810 struct extent_buffer *l; 1811 struct btrfs_key key; 1812 u64 physical; 1813 u64 logical; 1814 u64 generation; 1815 int mirror_num; 1816 struct reada_control *reada1; 1817 struct reada_control *reada2; 1818 struct btrfs_key key_start; 1819 struct btrfs_key key_end; 1820 u64 increment = map->stripe_len; 1821 u64 offset; 1822 1823 nstripes = length; 1824 offset = 0; 1825 do_div(nstripes, map->stripe_len); 1826 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 1827 offset = map->stripe_len * num; 1828 increment = map->stripe_len * map->num_stripes; 1829 mirror_num = 1; 1830 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 1831 int factor = map->num_stripes / map->sub_stripes; 1832 offset = map->stripe_len * (num / map->sub_stripes); 1833 increment = map->stripe_len * factor; 1834 mirror_num = num % map->sub_stripes + 1; 1835 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 1836 increment = map->stripe_len; 1837 mirror_num = num % map->num_stripes + 1; 1838 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 1839 increment = map->stripe_len; 1840 mirror_num = num % map->num_stripes + 1; 1841 } else { 1842 increment = map->stripe_len; 1843 mirror_num = 1; 1844 } 1845 1846 path = btrfs_alloc_path(); 1847 if (!path) 1848 return -ENOMEM; 1849 1850 /* 1851 * work on commit root. The related disk blocks are static as 1852 * long as COW is applied. This means, it is save to rewrite 1853 * them to repair disk errors without any race conditions 1854 */ 1855 path->search_commit_root = 1; 1856 path->skip_locking = 1; 1857 1858 /* 1859 * trigger the readahead for extent tree csum tree and wait for 1860 * completion. During readahead, the scrub is officially paused 1861 * to not hold off transaction commits 1862 */ 1863 logical = base + offset; 1864 1865 wait_event(sctx->list_wait, 1866 atomic_read(&sctx->in_flight) == 0); 1867 atomic_inc(&fs_info->scrubs_paused); 1868 wake_up(&fs_info->scrub_pause_wait); 1869 1870 /* FIXME it might be better to start readahead at commit root */ 1871 key_start.objectid = logical; 1872 key_start.type = BTRFS_EXTENT_ITEM_KEY; 1873 key_start.offset = (u64)0; 1874 key_end.objectid = base + offset + nstripes * increment; 1875 key_end.type = BTRFS_EXTENT_ITEM_KEY; 1876 key_end.offset = (u64)0; 1877 reada1 = btrfs_reada_add(root, &key_start, &key_end); 1878 1879 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 1880 key_start.type = BTRFS_EXTENT_CSUM_KEY; 1881 key_start.offset = logical; 1882 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 1883 key_end.type = BTRFS_EXTENT_CSUM_KEY; 1884 key_end.offset = base + offset + nstripes * increment; 1885 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end); 1886 1887 if (!IS_ERR(reada1)) 1888 btrfs_reada_wait(reada1); 1889 if (!IS_ERR(reada2)) 1890 btrfs_reada_wait(reada2); 1891 1892 mutex_lock(&fs_info->scrub_lock); 1893 while (atomic_read(&fs_info->scrub_pause_req)) { 1894 mutex_unlock(&fs_info->scrub_lock); 1895 wait_event(fs_info->scrub_pause_wait, 1896 atomic_read(&fs_info->scrub_pause_req) == 0); 1897 mutex_lock(&fs_info->scrub_lock); 1898 } 1899 atomic_dec(&fs_info->scrubs_paused); 1900 mutex_unlock(&fs_info->scrub_lock); 1901 wake_up(&fs_info->scrub_pause_wait); 1902 1903 /* 1904 * collect all data csums for the stripe to avoid seeking during 1905 * the scrub. This might currently (crc32) end up to be about 1MB 1906 */ 1907 blk_start_plug(&plug); 1908 1909 /* 1910 * now find all extents for each stripe and scrub them 1911 */ 1912 logical = base + offset; 1913 physical = map->stripes[num].physical; 1914 ret = 0; 1915 for (i = 0; i < nstripes; ++i) { 1916 /* 1917 * canceled? 1918 */ 1919 if (atomic_read(&fs_info->scrub_cancel_req) || 1920 atomic_read(&sctx->cancel_req)) { 1921 ret = -ECANCELED; 1922 goto out; 1923 } 1924 /* 1925 * check to see if we have to pause 1926 */ 1927 if (atomic_read(&fs_info->scrub_pause_req)) { 1928 /* push queued extents */ 1929 scrub_submit(sctx); 1930 wait_event(sctx->list_wait, 1931 atomic_read(&sctx->in_flight) == 0); 1932 atomic_inc(&fs_info->scrubs_paused); 1933 wake_up(&fs_info->scrub_pause_wait); 1934 mutex_lock(&fs_info->scrub_lock); 1935 while (atomic_read(&fs_info->scrub_pause_req)) { 1936 mutex_unlock(&fs_info->scrub_lock); 1937 wait_event(fs_info->scrub_pause_wait, 1938 atomic_read(&fs_info->scrub_pause_req) == 0); 1939 mutex_lock(&fs_info->scrub_lock); 1940 } 1941 atomic_dec(&fs_info->scrubs_paused); 1942 mutex_unlock(&fs_info->scrub_lock); 1943 wake_up(&fs_info->scrub_pause_wait); 1944 } 1945 1946 ret = btrfs_lookup_csums_range(csum_root, logical, 1947 logical + map->stripe_len - 1, 1948 &sctx->csum_list, 1); 1949 if (ret) 1950 goto out; 1951 1952 key.objectid = logical; 1953 key.type = BTRFS_EXTENT_ITEM_KEY; 1954 key.offset = (u64)0; 1955 1956 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1957 if (ret < 0) 1958 goto out; 1959 if (ret > 0) { 1960 ret = btrfs_previous_item(root, path, 0, 1961 BTRFS_EXTENT_ITEM_KEY); 1962 if (ret < 0) 1963 goto out; 1964 if (ret > 0) { 1965 /* there's no smaller item, so stick with the 1966 * larger one */ 1967 btrfs_release_path(path); 1968 ret = btrfs_search_slot(NULL, root, &key, 1969 path, 0, 0); 1970 if (ret < 0) 1971 goto out; 1972 } 1973 } 1974 1975 while (1) { 1976 l = path->nodes[0]; 1977 slot = path->slots[0]; 1978 if (slot >= btrfs_header_nritems(l)) { 1979 ret = btrfs_next_leaf(root, path); 1980 if (ret == 0) 1981 continue; 1982 if (ret < 0) 1983 goto out; 1984 1985 break; 1986 } 1987 btrfs_item_key_to_cpu(l, &key, slot); 1988 1989 if (key.objectid + key.offset <= logical) 1990 goto next; 1991 1992 if (key.objectid >= logical + map->stripe_len) 1993 break; 1994 1995 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) 1996 goto next; 1997 1998 extent = btrfs_item_ptr(l, slot, 1999 struct btrfs_extent_item); 2000 flags = btrfs_extent_flags(l, extent); 2001 generation = btrfs_extent_generation(l, extent); 2002 2003 if (key.objectid < logical && 2004 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { 2005 printk(KERN_ERR 2006 "btrfs scrub: tree block %llu spanning " 2007 "stripes, ignored. logical=%llu\n", 2008 (unsigned long long)key.objectid, 2009 (unsigned long long)logical); 2010 goto next; 2011 } 2012 2013 /* 2014 * trim extent to this stripe 2015 */ 2016 if (key.objectid < logical) { 2017 key.offset -= logical - key.objectid; 2018 key.objectid = logical; 2019 } 2020 if (key.objectid + key.offset > 2021 logical + map->stripe_len) { 2022 key.offset = logical + map->stripe_len - 2023 key.objectid; 2024 } 2025 2026 ret = scrub_extent(sctx, key.objectid, key.offset, 2027 key.objectid - logical + physical, 2028 scrub_dev, flags, generation, 2029 mirror_num); 2030 if (ret) 2031 goto out; 2032 2033next: 2034 path->slots[0]++; 2035 } 2036 btrfs_release_path(path); 2037 logical += increment; 2038 physical += map->stripe_len; 2039 spin_lock(&sctx->stat_lock); 2040 sctx->stat.last_physical = physical; 2041 spin_unlock(&sctx->stat_lock); 2042 } 2043 /* push queued extents */ 2044 scrub_submit(sctx); 2045 2046out: 2047 blk_finish_plug(&plug); 2048 btrfs_free_path(path); 2049 return ret < 0 ? ret : 0; 2050} 2051 2052static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 2053 struct btrfs_device *scrub_dev, 2054 u64 chunk_tree, u64 chunk_objectid, 2055 u64 chunk_offset, u64 length, 2056 u64 dev_offset) 2057{ 2058 struct btrfs_mapping_tree *map_tree = 2059 &sctx->dev_root->fs_info->mapping_tree; 2060 struct map_lookup *map; 2061 struct extent_map *em; 2062 int i; 2063 int ret = -EINVAL; 2064 2065 read_lock(&map_tree->map_tree.lock); 2066 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 2067 read_unlock(&map_tree->map_tree.lock); 2068 2069 if (!em) 2070 return -EINVAL; 2071 2072 map = (struct map_lookup *)em->bdev; 2073 if (em->start != chunk_offset) 2074 goto out; 2075 2076 if (em->len < length) 2077 goto out; 2078 2079 for (i = 0; i < map->num_stripes; ++i) { 2080 if (map->stripes[i].dev->bdev == scrub_dev->bdev && 2081 map->stripes[i].physical == dev_offset) { 2082 ret = scrub_stripe(sctx, map, scrub_dev, i, 2083 chunk_offset, length); 2084 if (ret) 2085 goto out; 2086 } 2087 } 2088out: 2089 free_extent_map(em); 2090 2091 return ret; 2092} 2093 2094static noinline_for_stack 2095int scrub_enumerate_chunks(struct scrub_ctx *sctx, 2096 struct btrfs_device *scrub_dev, u64 start, u64 end) 2097{ 2098 struct btrfs_dev_extent *dev_extent = NULL; 2099 struct btrfs_path *path; 2100 struct btrfs_root *root = sctx->dev_root; 2101 struct btrfs_fs_info *fs_info = root->fs_info; 2102 u64 length; 2103 u64 chunk_tree; 2104 u64 chunk_objectid; 2105 u64 chunk_offset; 2106 int ret; 2107 int slot; 2108 struct extent_buffer *l; 2109 struct btrfs_key key; 2110 struct btrfs_key found_key; 2111 struct btrfs_block_group_cache *cache; 2112 2113 path = btrfs_alloc_path(); 2114 if (!path) 2115 return -ENOMEM; 2116 2117 path->reada = 2; 2118 path->search_commit_root = 1; 2119 path->skip_locking = 1; 2120 2121 key.objectid = scrub_dev->devid; 2122 key.offset = 0ull; 2123 key.type = BTRFS_DEV_EXTENT_KEY; 2124 2125 while (1) { 2126 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2127 if (ret < 0) 2128 break; 2129 if (ret > 0) { 2130 if (path->slots[0] >= 2131 btrfs_header_nritems(path->nodes[0])) { 2132 ret = btrfs_next_leaf(root, path); 2133 if (ret) 2134 break; 2135 } 2136 } 2137 2138 l = path->nodes[0]; 2139 slot = path->slots[0]; 2140 2141 btrfs_item_key_to_cpu(l, &found_key, slot); 2142 2143 if (found_key.objectid != scrub_dev->devid) 2144 break; 2145 2146 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) 2147 break; 2148 2149 if (found_key.offset >= end) 2150 break; 2151 2152 if (found_key.offset < key.offset) 2153 break; 2154 2155 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2156 length = btrfs_dev_extent_length(l, dev_extent); 2157 2158 if (found_key.offset + length <= start) { 2159 key.offset = found_key.offset + length; 2160 btrfs_release_path(path); 2161 continue; 2162 } 2163 2164 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 2165 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 2166 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 2167 2168 /* 2169 * get a reference on the corresponding block group to prevent 2170 * the chunk from going away while we scrub it 2171 */ 2172 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2173 if (!cache) { 2174 ret = -ENOENT; 2175 break; 2176 } 2177 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid, 2178 chunk_offset, length, found_key.offset); 2179 btrfs_put_block_group(cache); 2180 if (ret) 2181 break; 2182 2183 key.offset = found_key.offset + length; 2184 btrfs_release_path(path); 2185 } 2186 2187 btrfs_free_path(path); 2188 2189 /* 2190 * ret can still be 1 from search_slot or next_leaf, 2191 * that's not an error 2192 */ 2193 return ret < 0 ? ret : 0; 2194} 2195 2196static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, 2197 struct btrfs_device *scrub_dev) 2198{ 2199 int i; 2200 u64 bytenr; 2201 u64 gen; 2202 int ret; 2203 struct btrfs_root *root = sctx->dev_root; 2204 2205 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 2206 return -EIO; 2207 2208 gen = root->fs_info->last_trans_committed; 2209 2210 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2211 bytenr = btrfs_sb_offset(i); 2212 if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes) 2213 break; 2214 2215 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, 2216 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, 2217 NULL, 1); 2218 if (ret) 2219 return ret; 2220 } 2221 wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0); 2222 2223 return 0; 2224} 2225 2226/* 2227 * get a reference count on fs_info->scrub_workers. start worker if necessary 2228 */ 2229static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) 2230{ 2231 struct btrfs_fs_info *fs_info = root->fs_info; 2232 int ret = 0; 2233 2234 mutex_lock(&fs_info->scrub_lock); 2235 if (fs_info->scrub_workers_refcnt == 0) { 2236 btrfs_init_workers(&fs_info->scrub_workers, "scrub", 2237 fs_info->thread_pool_size, &fs_info->generic_worker); 2238 fs_info->scrub_workers.idle_thresh = 4; 2239 ret = btrfs_start_workers(&fs_info->scrub_workers); 2240 if (ret) 2241 goto out; 2242 } 2243 ++fs_info->scrub_workers_refcnt; 2244out: 2245 mutex_unlock(&fs_info->scrub_lock); 2246 2247 return ret; 2248} 2249 2250static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) 2251{ 2252 struct btrfs_fs_info *fs_info = root->fs_info; 2253 2254 mutex_lock(&fs_info->scrub_lock); 2255 if (--fs_info->scrub_workers_refcnt == 0) 2256 btrfs_stop_workers(&fs_info->scrub_workers); 2257 WARN_ON(fs_info->scrub_workers_refcnt < 0); 2258 mutex_unlock(&fs_info->scrub_lock); 2259} 2260 2261 2262int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, 2263 struct btrfs_scrub_progress *progress, int readonly) 2264{ 2265 struct scrub_ctx *sctx; 2266 struct btrfs_fs_info *fs_info = root->fs_info; 2267 int ret; 2268 struct btrfs_device *dev; 2269 2270 if (btrfs_fs_closing(root->fs_info)) 2271 return -EINVAL; 2272 2273 /* 2274 * check some assumptions 2275 */ 2276 if (root->nodesize != root->leafsize) { 2277 printk(KERN_ERR 2278 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n", 2279 root->nodesize, root->leafsize); 2280 return -EINVAL; 2281 } 2282 2283 if (root->nodesize > BTRFS_STRIPE_LEN) { 2284 /* 2285 * in this case scrub is unable to calculate the checksum 2286 * the way scrub is implemented. Do not handle this 2287 * situation at all because it won't ever happen. 2288 */ 2289 printk(KERN_ERR 2290 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n", 2291 root->nodesize, BTRFS_STRIPE_LEN); 2292 return -EINVAL; 2293 } 2294 2295 if (root->sectorsize != PAGE_SIZE) { 2296 /* not supported for data w/o checksums */ 2297 printk(KERN_ERR 2298 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n", 2299 root->sectorsize, (unsigned long long)PAGE_SIZE); 2300 return -EINVAL; 2301 } 2302 2303 if (fs_info->chunk_root->nodesize > 2304 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || 2305 fs_info->chunk_root->sectorsize > 2306 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { 2307 /* 2308 * would exhaust the array bounds of pagev member in 2309 * struct scrub_block 2310 */ 2311 pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n", 2312 fs_info->chunk_root->nodesize, 2313 SCRUB_MAX_PAGES_PER_BLOCK, 2314 fs_info->chunk_root->sectorsize, 2315 SCRUB_MAX_PAGES_PER_BLOCK); 2316 return -EINVAL; 2317 } 2318 2319 ret = scrub_workers_get(root); 2320 if (ret) 2321 return ret; 2322 2323 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2324 dev = btrfs_find_device(root, devid, NULL, NULL); 2325 if (!dev || dev->missing) { 2326 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2327 scrub_workers_put(root); 2328 return -ENODEV; 2329 } 2330 mutex_lock(&fs_info->scrub_lock); 2331 2332 if (!dev->in_fs_metadata) { 2333 mutex_unlock(&fs_info->scrub_lock); 2334 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2335 scrub_workers_put(root); 2336 return -ENODEV; 2337 } 2338 2339 if (dev->scrub_device) { 2340 mutex_unlock(&fs_info->scrub_lock); 2341 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2342 scrub_workers_put(root); 2343 return -EINPROGRESS; 2344 } 2345 sctx = scrub_setup_ctx(dev); 2346 if (IS_ERR(sctx)) { 2347 mutex_unlock(&fs_info->scrub_lock); 2348 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2349 scrub_workers_put(root); 2350 return PTR_ERR(sctx); 2351 } 2352 sctx->readonly = readonly; 2353 dev->scrub_device = sctx; 2354 2355 atomic_inc(&fs_info->scrubs_running); 2356 mutex_unlock(&fs_info->scrub_lock); 2357 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2358 2359 down_read(&fs_info->scrub_super_lock); 2360 ret = scrub_supers(sctx, dev); 2361 up_read(&fs_info->scrub_super_lock); 2362 2363 if (!ret) 2364 ret = scrub_enumerate_chunks(sctx, dev, start, end); 2365 2366 wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0); 2367 atomic_dec(&fs_info->scrubs_running); 2368 wake_up(&fs_info->scrub_pause_wait); 2369 2370 wait_event(sctx->list_wait, atomic_read(&sctx->fixup_cnt) == 0); 2371 2372 if (progress) 2373 memcpy(progress, &sctx->stat, sizeof(*progress)); 2374 2375 mutex_lock(&fs_info->scrub_lock); 2376 dev->scrub_device = NULL; 2377 mutex_unlock(&fs_info->scrub_lock); 2378 2379 scrub_free_ctx(sctx); 2380 scrub_workers_put(root); 2381 2382 return ret; 2383} 2384 2385void btrfs_scrub_pause(struct btrfs_root *root) 2386{ 2387 struct btrfs_fs_info *fs_info = root->fs_info; 2388 2389 mutex_lock(&fs_info->scrub_lock); 2390 atomic_inc(&fs_info->scrub_pause_req); 2391 while (atomic_read(&fs_info->scrubs_paused) != 2392 atomic_read(&fs_info->scrubs_running)) { 2393 mutex_unlock(&fs_info->scrub_lock); 2394 wait_event(fs_info->scrub_pause_wait, 2395 atomic_read(&fs_info->scrubs_paused) == 2396 atomic_read(&fs_info->scrubs_running)); 2397 mutex_lock(&fs_info->scrub_lock); 2398 } 2399 mutex_unlock(&fs_info->scrub_lock); 2400} 2401 2402void btrfs_scrub_continue(struct btrfs_root *root) 2403{ 2404 struct btrfs_fs_info *fs_info = root->fs_info; 2405 2406 atomic_dec(&fs_info->scrub_pause_req); 2407 wake_up(&fs_info->scrub_pause_wait); 2408} 2409 2410void btrfs_scrub_pause_super(struct btrfs_root *root) 2411{ 2412 down_write(&root->fs_info->scrub_super_lock); 2413} 2414 2415void btrfs_scrub_continue_super(struct btrfs_root *root) 2416{ 2417 up_write(&root->fs_info->scrub_super_lock); 2418} 2419 2420int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) 2421{ 2422 2423 mutex_lock(&fs_info->scrub_lock); 2424 if (!atomic_read(&fs_info->scrubs_running)) { 2425 mutex_unlock(&fs_info->scrub_lock); 2426 return -ENOTCONN; 2427 } 2428 2429 atomic_inc(&fs_info->scrub_cancel_req); 2430 while (atomic_read(&fs_info->scrubs_running)) { 2431 mutex_unlock(&fs_info->scrub_lock); 2432 wait_event(fs_info->scrub_pause_wait, 2433 atomic_read(&fs_info->scrubs_running) == 0); 2434 mutex_lock(&fs_info->scrub_lock); 2435 } 2436 atomic_dec(&fs_info->scrub_cancel_req); 2437 mutex_unlock(&fs_info->scrub_lock); 2438 2439 return 0; 2440} 2441 2442int btrfs_scrub_cancel(struct btrfs_root *root) 2443{ 2444 return __btrfs_scrub_cancel(root->fs_info); 2445} 2446 2447int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) 2448{ 2449 struct btrfs_fs_info *fs_info = root->fs_info; 2450 struct scrub_ctx *sctx; 2451 2452 mutex_lock(&fs_info->scrub_lock); 2453 sctx = dev->scrub_device; 2454 if (!sctx) { 2455 mutex_unlock(&fs_info->scrub_lock); 2456 return -ENOTCONN; 2457 } 2458 atomic_inc(&sctx->cancel_req); 2459 while (dev->scrub_device) { 2460 mutex_unlock(&fs_info->scrub_lock); 2461 wait_event(fs_info->scrub_pause_wait, 2462 dev->scrub_device == NULL); 2463 mutex_lock(&fs_info->scrub_lock); 2464 } 2465 mutex_unlock(&fs_info->scrub_lock); 2466 2467 return 0; 2468} 2469 2470int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) 2471{ 2472 struct btrfs_fs_info *fs_info = root->fs_info; 2473 struct btrfs_device *dev; 2474 int ret; 2475 2476 /* 2477 * we have to hold the device_list_mutex here so the device 2478 * does not go away in cancel_dev. FIXME: find a better solution 2479 */ 2480 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2481 dev = btrfs_find_device(root, devid, NULL, NULL); 2482 if (!dev) { 2483 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2484 return -ENODEV; 2485 } 2486 ret = btrfs_scrub_cancel_dev(root, dev); 2487 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2488 2489 return ret; 2490} 2491 2492int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 2493 struct btrfs_scrub_progress *progress) 2494{ 2495 struct btrfs_device *dev; 2496 struct scrub_ctx *sctx = NULL; 2497 2498 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2499 dev = btrfs_find_device(root, devid, NULL, NULL); 2500 if (dev) 2501 sctx = dev->scrub_device; 2502 if (sctx) 2503 memcpy(progress, &sctx->stat, sizeof(*progress)); 2504 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2505 2506 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; 2507} 2508