scrub.c revision 442a4f6308e694e0fa6025708bd5e4e424bbf51c
1/*
2 * Copyright (C) 2011 STRATO.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/blkdev.h>
20#include <linux/ratelimit.h>
21#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
25#include "transaction.h"
26#include "backref.h"
27#include "extent_io.h"
28#include "check-integrity.h"
29
30/*
31 * This is only the first step towards a full-features scrub. It reads all
32 * extent and super block and verifies the checksums. In case a bad checksum
33 * is found or the extent cannot be read, good data will be written back if
34 * any can be found.
35 *
36 * Future enhancements:
37 *  - In case an unrepairable extent is encountered, track which files are
38 *    affected and report them
39 *  - track and record media errors, throw out bad devices
40 *  - add a mode to also read unallocated space
41 */
42
43struct scrub_block;
44struct scrub_dev;
45
46#define SCRUB_PAGES_PER_BIO	16	/* 64k per bio */
47#define SCRUB_BIOS_PER_DEV	16	/* 1 MB per device in flight */
48#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
49
50struct scrub_page {
51	struct scrub_block	*sblock;
52	struct page		*page;
53	struct btrfs_device	*dev;
54	u64			flags;  /* extent flags */
55	u64			generation;
56	u64			logical;
57	u64			physical;
58	struct {
59		unsigned int	mirror_num:8;
60		unsigned int	have_csum:1;
61		unsigned int	io_error:1;
62	};
63	u8			csum[BTRFS_CSUM_SIZE];
64};
65
66struct scrub_bio {
67	int			index;
68	struct scrub_dev	*sdev;
69	struct bio		*bio;
70	int			err;
71	u64			logical;
72	u64			physical;
73	struct scrub_page	*pagev[SCRUB_PAGES_PER_BIO];
74	int			page_count;
75	int			next_free;
76	struct btrfs_work	work;
77};
78
79struct scrub_block {
80	struct scrub_page	pagev[SCRUB_MAX_PAGES_PER_BLOCK];
81	int			page_count;
82	atomic_t		outstanding_pages;
83	atomic_t		ref_count; /* free mem on transition to zero */
84	struct scrub_dev	*sdev;
85	struct {
86		unsigned int	header_error:1;
87		unsigned int	checksum_error:1;
88		unsigned int	no_io_error_seen:1;
89		unsigned int	generation_error:1; /* also sets header_error */
90	};
91};
92
93struct scrub_dev {
94	struct scrub_bio	*bios[SCRUB_BIOS_PER_DEV];
95	struct btrfs_device	*dev;
96	int			first_free;
97	int			curr;
98	atomic_t		in_flight;
99	atomic_t		fixup_cnt;
100	spinlock_t		list_lock;
101	wait_queue_head_t	list_wait;
102	u16			csum_size;
103	struct list_head	csum_list;
104	atomic_t		cancel_req;
105	int			readonly;
106	int			pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
107	u32			sectorsize;
108	u32			nodesize;
109	u32			leafsize;
110	/*
111	 * statistics
112	 */
113	struct btrfs_scrub_progress stat;
114	spinlock_t		stat_lock;
115};
116
117struct scrub_fixup_nodatasum {
118	struct scrub_dev	*sdev;
119	u64			logical;
120	struct btrfs_root	*root;
121	struct btrfs_work	work;
122	int			mirror_num;
123};
124
125struct scrub_warning {
126	struct btrfs_path	*path;
127	u64			extent_item_size;
128	char			*scratch_buf;
129	char			*msg_buf;
130	const char		*errstr;
131	sector_t		sector;
132	u64			logical;
133	struct btrfs_device	*dev;
134	int			msg_bufsize;
135	int			scratch_bufsize;
136};
137
138
139static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
140static int scrub_setup_recheck_block(struct scrub_dev *sdev,
141				     struct btrfs_mapping_tree *map_tree,
142				     u64 length, u64 logical,
143				     struct scrub_block *sblock);
144static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
145			       struct scrub_block *sblock, int is_metadata,
146			       int have_csum, u8 *csum, u64 generation,
147			       u16 csum_size);
148static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
149					 struct scrub_block *sblock,
150					 int is_metadata, int have_csum,
151					 const u8 *csum, u64 generation,
152					 u16 csum_size);
153static void scrub_complete_bio_end_io(struct bio *bio, int err);
154static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
155					     struct scrub_block *sblock_good,
156					     int force_write);
157static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
158					    struct scrub_block *sblock_good,
159					    int page_num, int force_write);
160static int scrub_checksum_data(struct scrub_block *sblock);
161static int scrub_checksum_tree_block(struct scrub_block *sblock);
162static int scrub_checksum_super(struct scrub_block *sblock);
163static void scrub_block_get(struct scrub_block *sblock);
164static void scrub_block_put(struct scrub_block *sblock);
165static int scrub_add_page_to_bio(struct scrub_dev *sdev,
166				 struct scrub_page *spage);
167static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
168		       u64 physical, u64 flags, u64 gen, int mirror_num,
169		       u8 *csum, int force);
170static void scrub_bio_end_io(struct bio *bio, int err);
171static void scrub_bio_end_io_worker(struct btrfs_work *work);
172static void scrub_block_complete(struct scrub_block *sblock);
173
174
175static void scrub_free_csums(struct scrub_dev *sdev)
176{
177	while (!list_empty(&sdev->csum_list)) {
178		struct btrfs_ordered_sum *sum;
179		sum = list_first_entry(&sdev->csum_list,
180				       struct btrfs_ordered_sum, list);
181		list_del(&sum->list);
182		kfree(sum);
183	}
184}
185
186static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
187{
188	int i;
189
190	if (!sdev)
191		return;
192
193	/* this can happen when scrub is cancelled */
194	if (sdev->curr != -1) {
195		struct scrub_bio *sbio = sdev->bios[sdev->curr];
196
197		for (i = 0; i < sbio->page_count; i++) {
198			BUG_ON(!sbio->pagev[i]);
199			BUG_ON(!sbio->pagev[i]->page);
200			scrub_block_put(sbio->pagev[i]->sblock);
201		}
202		bio_put(sbio->bio);
203	}
204
205	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
206		struct scrub_bio *sbio = sdev->bios[i];
207
208		if (!sbio)
209			break;
210		kfree(sbio);
211	}
212
213	scrub_free_csums(sdev);
214	kfree(sdev);
215}
216
217static noinline_for_stack
218struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
219{
220	struct scrub_dev *sdev;
221	int		i;
222	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
223	int pages_per_bio;
224
225	pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
226			      bio_get_nr_vecs(dev->bdev));
227	sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
228	if (!sdev)
229		goto nomem;
230	sdev->dev = dev;
231	sdev->pages_per_bio = pages_per_bio;
232	sdev->curr = -1;
233	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
234		struct scrub_bio *sbio;
235
236		sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
237		if (!sbio)
238			goto nomem;
239		sdev->bios[i] = sbio;
240
241		sbio->index = i;
242		sbio->sdev = sdev;
243		sbio->page_count = 0;
244		sbio->work.func = scrub_bio_end_io_worker;
245
246		if (i != SCRUB_BIOS_PER_DEV-1)
247			sdev->bios[i]->next_free = i + 1;
248		else
249			sdev->bios[i]->next_free = -1;
250	}
251	sdev->first_free = 0;
252	sdev->nodesize = dev->dev_root->nodesize;
253	sdev->leafsize = dev->dev_root->leafsize;
254	sdev->sectorsize = dev->dev_root->sectorsize;
255	atomic_set(&sdev->in_flight, 0);
256	atomic_set(&sdev->fixup_cnt, 0);
257	atomic_set(&sdev->cancel_req, 0);
258	sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
259	INIT_LIST_HEAD(&sdev->csum_list);
260
261	spin_lock_init(&sdev->list_lock);
262	spin_lock_init(&sdev->stat_lock);
263	init_waitqueue_head(&sdev->list_wait);
264	return sdev;
265
266nomem:
267	scrub_free_dev(sdev);
268	return ERR_PTR(-ENOMEM);
269}
270
271static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
272{
273	u64 isize;
274	u32 nlink;
275	int ret;
276	int i;
277	struct extent_buffer *eb;
278	struct btrfs_inode_item *inode_item;
279	struct scrub_warning *swarn = ctx;
280	struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
281	struct inode_fs_paths *ipath = NULL;
282	struct btrfs_root *local_root;
283	struct btrfs_key root_key;
284
285	root_key.objectid = root;
286	root_key.type = BTRFS_ROOT_ITEM_KEY;
287	root_key.offset = (u64)-1;
288	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
289	if (IS_ERR(local_root)) {
290		ret = PTR_ERR(local_root);
291		goto err;
292	}
293
294	ret = inode_item_info(inum, 0, local_root, swarn->path);
295	if (ret) {
296		btrfs_release_path(swarn->path);
297		goto err;
298	}
299
300	eb = swarn->path->nodes[0];
301	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
302					struct btrfs_inode_item);
303	isize = btrfs_inode_size(eb, inode_item);
304	nlink = btrfs_inode_nlink(eb, inode_item);
305	btrfs_release_path(swarn->path);
306
307	ipath = init_ipath(4096, local_root, swarn->path);
308	if (IS_ERR(ipath)) {
309		ret = PTR_ERR(ipath);
310		ipath = NULL;
311		goto err;
312	}
313	ret = paths_from_inode(inum, ipath);
314
315	if (ret < 0)
316		goto err;
317
318	/*
319	 * we deliberately ignore the bit ipath might have been too small to
320	 * hold all of the paths here
321	 */
322	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
323		printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
324			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
325			"length %llu, links %u (path: %s)\n", swarn->errstr,
326			swarn->logical, swarn->dev->name,
327			(unsigned long long)swarn->sector, root, inum, offset,
328			min(isize - offset, (u64)PAGE_SIZE), nlink,
329			(char *)(unsigned long)ipath->fspath->val[i]);
330
331	free_ipath(ipath);
332	return 0;
333
334err:
335	printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
336		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
337		"resolving failed with ret=%d\n", swarn->errstr,
338		swarn->logical, swarn->dev->name,
339		(unsigned long long)swarn->sector, root, inum, offset, ret);
340
341	free_ipath(ipath);
342	return 0;
343}
344
345static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
346{
347	struct btrfs_device *dev = sblock->sdev->dev;
348	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
349	struct btrfs_path *path;
350	struct btrfs_key found_key;
351	struct extent_buffer *eb;
352	struct btrfs_extent_item *ei;
353	struct scrub_warning swarn;
354	u32 item_size;
355	int ret;
356	u64 ref_root;
357	u8 ref_level;
358	unsigned long ptr = 0;
359	const int bufsize = 4096;
360	u64 extent_item_pos;
361
362	path = btrfs_alloc_path();
363
364	swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
365	swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
366	BUG_ON(sblock->page_count < 1);
367	swarn.sector = (sblock->pagev[0].physical) >> 9;
368	swarn.logical = sblock->pagev[0].logical;
369	swarn.errstr = errstr;
370	swarn.dev = dev;
371	swarn.msg_bufsize = bufsize;
372	swarn.scratch_bufsize = bufsize;
373
374	if (!path || !swarn.scratch_buf || !swarn.msg_buf)
375		goto out;
376
377	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
378	if (ret < 0)
379		goto out;
380
381	extent_item_pos = swarn.logical - found_key.objectid;
382	swarn.extent_item_size = found_key.offset;
383
384	eb = path->nodes[0];
385	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
386	item_size = btrfs_item_size_nr(eb, path->slots[0]);
387	btrfs_release_path(path);
388
389	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
390		do {
391			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
392							&ref_root, &ref_level);
393			printk(KERN_WARNING
394				"btrfs: %s at logical %llu on dev %s, "
395				"sector %llu: metadata %s (level %d) in tree "
396				"%llu\n", errstr, swarn.logical, dev->name,
397				(unsigned long long)swarn.sector,
398				ref_level ? "node" : "leaf",
399				ret < 0 ? -1 : ref_level,
400				ret < 0 ? -1 : ref_root);
401		} while (ret != 1);
402	} else {
403		swarn.path = path;
404		iterate_extent_inodes(fs_info, found_key.objectid,
405					extent_item_pos, 1,
406					scrub_print_warning_inode, &swarn);
407	}
408
409out:
410	btrfs_free_path(path);
411	kfree(swarn.scratch_buf);
412	kfree(swarn.msg_buf);
413}
414
415static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
416{
417	struct page *page = NULL;
418	unsigned long index;
419	struct scrub_fixup_nodatasum *fixup = ctx;
420	int ret;
421	int corrected = 0;
422	struct btrfs_key key;
423	struct inode *inode = NULL;
424	u64 end = offset + PAGE_SIZE - 1;
425	struct btrfs_root *local_root;
426
427	key.objectid = root;
428	key.type = BTRFS_ROOT_ITEM_KEY;
429	key.offset = (u64)-1;
430	local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
431	if (IS_ERR(local_root))
432		return PTR_ERR(local_root);
433
434	key.type = BTRFS_INODE_ITEM_KEY;
435	key.objectid = inum;
436	key.offset = 0;
437	inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
438	if (IS_ERR(inode))
439		return PTR_ERR(inode);
440
441	index = offset >> PAGE_CACHE_SHIFT;
442
443	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
444	if (!page) {
445		ret = -ENOMEM;
446		goto out;
447	}
448
449	if (PageUptodate(page)) {
450		struct btrfs_mapping_tree *map_tree;
451		if (PageDirty(page)) {
452			/*
453			 * we need to write the data to the defect sector. the
454			 * data that was in that sector is not in memory,
455			 * because the page was modified. we must not write the
456			 * modified page to that sector.
457			 *
458			 * TODO: what could be done here: wait for the delalloc
459			 *       runner to write out that page (might involve
460			 *       COW) and see whether the sector is still
461			 *       referenced afterwards.
462			 *
463			 * For the meantime, we'll treat this error
464			 * incorrectable, although there is a chance that a
465			 * later scrub will find the bad sector again and that
466			 * there's no dirty page in memory, then.
467			 */
468			ret = -EIO;
469			goto out;
470		}
471		map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
472		ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
473					fixup->logical, page,
474					fixup->mirror_num);
475		unlock_page(page);
476		corrected = !ret;
477	} else {
478		/*
479		 * we need to get good data first. the general readpage path
480		 * will call repair_io_failure for us, we just have to make
481		 * sure we read the bad mirror.
482		 */
483		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
484					EXTENT_DAMAGED, GFP_NOFS);
485		if (ret) {
486			/* set_extent_bits should give proper error */
487			WARN_ON(ret > 0);
488			if (ret > 0)
489				ret = -EFAULT;
490			goto out;
491		}
492
493		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
494						btrfs_get_extent,
495						fixup->mirror_num);
496		wait_on_page_locked(page);
497
498		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
499						end, EXTENT_DAMAGED, 0, NULL);
500		if (!corrected)
501			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
502						EXTENT_DAMAGED, GFP_NOFS);
503	}
504
505out:
506	if (page)
507		put_page(page);
508	if (inode)
509		iput(inode);
510
511	if (ret < 0)
512		return ret;
513
514	if (ret == 0 && corrected) {
515		/*
516		 * we only need to call readpage for one of the inodes belonging
517		 * to this extent. so make iterate_extent_inodes stop
518		 */
519		return 1;
520	}
521
522	return -EIO;
523}
524
525static void scrub_fixup_nodatasum(struct btrfs_work *work)
526{
527	int ret;
528	struct scrub_fixup_nodatasum *fixup;
529	struct scrub_dev *sdev;
530	struct btrfs_trans_handle *trans = NULL;
531	struct btrfs_fs_info *fs_info;
532	struct btrfs_path *path;
533	int uncorrectable = 0;
534
535	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
536	sdev = fixup->sdev;
537	fs_info = fixup->root->fs_info;
538
539	path = btrfs_alloc_path();
540	if (!path) {
541		spin_lock(&sdev->stat_lock);
542		++sdev->stat.malloc_errors;
543		spin_unlock(&sdev->stat_lock);
544		uncorrectable = 1;
545		goto out;
546	}
547
548	trans = btrfs_join_transaction(fixup->root);
549	if (IS_ERR(trans)) {
550		uncorrectable = 1;
551		goto out;
552	}
553
554	/*
555	 * the idea is to trigger a regular read through the standard path. we
556	 * read a page from the (failed) logical address by specifying the
557	 * corresponding copynum of the failed sector. thus, that readpage is
558	 * expected to fail.
559	 * that is the point where on-the-fly error correction will kick in
560	 * (once it's finished) and rewrite the failed sector if a good copy
561	 * can be found.
562	 */
563	ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
564						path, scrub_fixup_readpage,
565						fixup);
566	if (ret < 0) {
567		uncorrectable = 1;
568		goto out;
569	}
570	WARN_ON(ret != 1);
571
572	spin_lock(&sdev->stat_lock);
573	++sdev->stat.corrected_errors;
574	spin_unlock(&sdev->stat_lock);
575
576out:
577	if (trans && !IS_ERR(trans))
578		btrfs_end_transaction(trans, fixup->root);
579	if (uncorrectable) {
580		spin_lock(&sdev->stat_lock);
581		++sdev->stat.uncorrectable_errors;
582		spin_unlock(&sdev->stat_lock);
583		printk_ratelimited(KERN_ERR
584			"btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
585			(unsigned long long)fixup->logical, sdev->dev->name);
586	}
587
588	btrfs_free_path(path);
589	kfree(fixup);
590
591	/* see caller why we're pretending to be paused in the scrub counters */
592	mutex_lock(&fs_info->scrub_lock);
593	atomic_dec(&fs_info->scrubs_running);
594	atomic_dec(&fs_info->scrubs_paused);
595	mutex_unlock(&fs_info->scrub_lock);
596	atomic_dec(&sdev->fixup_cnt);
597	wake_up(&fs_info->scrub_pause_wait);
598	wake_up(&sdev->list_wait);
599}
600
601/*
602 * scrub_handle_errored_block gets called when either verification of the
603 * pages failed or the bio failed to read, e.g. with EIO. In the latter
604 * case, this function handles all pages in the bio, even though only one
605 * may be bad.
606 * The goal of this function is to repair the errored block by using the
607 * contents of one of the mirrors.
608 */
609static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
610{
611	struct scrub_dev *sdev = sblock_to_check->sdev;
612	struct btrfs_fs_info *fs_info;
613	u64 length;
614	u64 logical;
615	u64 generation;
616	unsigned int failed_mirror_index;
617	unsigned int is_metadata;
618	unsigned int have_csum;
619	u8 *csum;
620	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
621	struct scrub_block *sblock_bad;
622	int ret;
623	int mirror_index;
624	int page_num;
625	int success;
626	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
627				      DEFAULT_RATELIMIT_BURST);
628
629	BUG_ON(sblock_to_check->page_count < 1);
630	fs_info = sdev->dev->dev_root->fs_info;
631	length = sblock_to_check->page_count * PAGE_SIZE;
632	logical = sblock_to_check->pagev[0].logical;
633	generation = sblock_to_check->pagev[0].generation;
634	BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
635	failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
636	is_metadata = !(sblock_to_check->pagev[0].flags &
637			BTRFS_EXTENT_FLAG_DATA);
638	have_csum = sblock_to_check->pagev[0].have_csum;
639	csum = sblock_to_check->pagev[0].csum;
640
641	/*
642	 * read all mirrors one after the other. This includes to
643	 * re-read the extent or metadata block that failed (that was
644	 * the cause that this fixup code is called) another time,
645	 * page by page this time in order to know which pages
646	 * caused I/O errors and which ones are good (for all mirrors).
647	 * It is the goal to handle the situation when more than one
648	 * mirror contains I/O errors, but the errors do not
649	 * overlap, i.e. the data can be repaired by selecting the
650	 * pages from those mirrors without I/O error on the
651	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
652	 * would be that mirror #1 has an I/O error on the first page,
653	 * the second page is good, and mirror #2 has an I/O error on
654	 * the second page, but the first page is good.
655	 * Then the first page of the first mirror can be repaired by
656	 * taking the first page of the second mirror, and the
657	 * second page of the second mirror can be repaired by
658	 * copying the contents of the 2nd page of the 1st mirror.
659	 * One more note: if the pages of one mirror contain I/O
660	 * errors, the checksum cannot be verified. In order to get
661	 * the best data for repairing, the first attempt is to find
662	 * a mirror without I/O errors and with a validated checksum.
663	 * Only if this is not possible, the pages are picked from
664	 * mirrors with I/O errors without considering the checksum.
665	 * If the latter is the case, at the end, the checksum of the
666	 * repaired area is verified in order to correctly maintain
667	 * the statistics.
668	 */
669
670	sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
671				     sizeof(*sblocks_for_recheck),
672				     GFP_NOFS);
673	if (!sblocks_for_recheck) {
674		spin_lock(&sdev->stat_lock);
675		sdev->stat.malloc_errors++;
676		sdev->stat.read_errors++;
677		sdev->stat.uncorrectable_errors++;
678		spin_unlock(&sdev->stat_lock);
679		btrfs_dev_stat_inc_and_print(sdev->dev,
680					     BTRFS_DEV_STAT_READ_ERRS);
681		goto out;
682	}
683
684	/* setup the context, map the logical blocks and alloc the pages */
685	ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
686					logical, sblocks_for_recheck);
687	if (ret) {
688		spin_lock(&sdev->stat_lock);
689		sdev->stat.read_errors++;
690		sdev->stat.uncorrectable_errors++;
691		spin_unlock(&sdev->stat_lock);
692		btrfs_dev_stat_inc_and_print(sdev->dev,
693					     BTRFS_DEV_STAT_READ_ERRS);
694		goto out;
695	}
696	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
697	sblock_bad = sblocks_for_recheck + failed_mirror_index;
698
699	/* build and submit the bios for the failed mirror, check checksums */
700	ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
701				  csum, generation, sdev->csum_size);
702	if (ret) {
703		spin_lock(&sdev->stat_lock);
704		sdev->stat.read_errors++;
705		sdev->stat.uncorrectable_errors++;
706		spin_unlock(&sdev->stat_lock);
707		btrfs_dev_stat_inc_and_print(sdev->dev,
708					     BTRFS_DEV_STAT_READ_ERRS);
709		goto out;
710	}
711
712	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
713	    sblock_bad->no_io_error_seen) {
714		/*
715		 * the error disappeared after reading page by page, or
716		 * the area was part of a huge bio and other parts of the
717		 * bio caused I/O errors, or the block layer merged several
718		 * read requests into one and the error is caused by a
719		 * different bio (usually one of the two latter cases is
720		 * the cause)
721		 */
722		spin_lock(&sdev->stat_lock);
723		sdev->stat.unverified_errors++;
724		spin_unlock(&sdev->stat_lock);
725
726		goto out;
727	}
728
729	if (!sblock_bad->no_io_error_seen) {
730		spin_lock(&sdev->stat_lock);
731		sdev->stat.read_errors++;
732		spin_unlock(&sdev->stat_lock);
733		if (__ratelimit(&_rs))
734			scrub_print_warning("i/o error", sblock_to_check);
735		btrfs_dev_stat_inc_and_print(sdev->dev,
736					     BTRFS_DEV_STAT_READ_ERRS);
737	} else if (sblock_bad->checksum_error) {
738		spin_lock(&sdev->stat_lock);
739		sdev->stat.csum_errors++;
740		spin_unlock(&sdev->stat_lock);
741		if (__ratelimit(&_rs))
742			scrub_print_warning("checksum error", sblock_to_check);
743		btrfs_dev_stat_inc_and_print(sdev->dev,
744					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
745	} else if (sblock_bad->header_error) {
746		spin_lock(&sdev->stat_lock);
747		sdev->stat.verify_errors++;
748		spin_unlock(&sdev->stat_lock);
749		if (__ratelimit(&_rs))
750			scrub_print_warning("checksum/header error",
751					    sblock_to_check);
752		if (sblock_bad->generation_error)
753			btrfs_dev_stat_inc_and_print(sdev->dev,
754				BTRFS_DEV_STAT_GENERATION_ERRS);
755		else
756			btrfs_dev_stat_inc_and_print(sdev->dev,
757				BTRFS_DEV_STAT_CORRUPTION_ERRS);
758	}
759
760	if (sdev->readonly)
761		goto did_not_correct_error;
762
763	if (!is_metadata && !have_csum) {
764		struct scrub_fixup_nodatasum *fixup_nodatasum;
765
766		/*
767		 * !is_metadata and !have_csum, this means that the data
768		 * might not be COW'ed, that it might be modified
769		 * concurrently. The general strategy to work on the
770		 * commit root does not help in the case when COW is not
771		 * used.
772		 */
773		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
774		if (!fixup_nodatasum)
775			goto did_not_correct_error;
776		fixup_nodatasum->sdev = sdev;
777		fixup_nodatasum->logical = logical;
778		fixup_nodatasum->root = fs_info->extent_root;
779		fixup_nodatasum->mirror_num = failed_mirror_index + 1;
780		/*
781		 * increment scrubs_running to prevent cancel requests from
782		 * completing as long as a fixup worker is running. we must also
783		 * increment scrubs_paused to prevent deadlocking on pause
784		 * requests used for transactions commits (as the worker uses a
785		 * transaction context). it is safe to regard the fixup worker
786		 * as paused for all matters practical. effectively, we only
787		 * avoid cancellation requests from completing.
788		 */
789		mutex_lock(&fs_info->scrub_lock);
790		atomic_inc(&fs_info->scrubs_running);
791		atomic_inc(&fs_info->scrubs_paused);
792		mutex_unlock(&fs_info->scrub_lock);
793		atomic_inc(&sdev->fixup_cnt);
794		fixup_nodatasum->work.func = scrub_fixup_nodatasum;
795		btrfs_queue_worker(&fs_info->scrub_workers,
796				   &fixup_nodatasum->work);
797		goto out;
798	}
799
800	/*
801	 * now build and submit the bios for the other mirrors, check
802	 * checksums
803	 */
804	for (mirror_index = 0;
805	     mirror_index < BTRFS_MAX_MIRRORS &&
806	     sblocks_for_recheck[mirror_index].page_count > 0;
807	     mirror_index++) {
808		if (mirror_index == failed_mirror_index)
809			continue;
810
811		/* build and submit the bios, check checksums */
812		ret = scrub_recheck_block(fs_info,
813					  sblocks_for_recheck + mirror_index,
814					  is_metadata, have_csum, csum,
815					  generation, sdev->csum_size);
816		if (ret)
817			goto did_not_correct_error;
818	}
819
820	/*
821	 * first try to pick the mirror which is completely without I/O
822	 * errors and also does not have a checksum error.
823	 * If one is found, and if a checksum is present, the full block
824	 * that is known to contain an error is rewritten. Afterwards
825	 * the block is known to be corrected.
826	 * If a mirror is found which is completely correct, and no
827	 * checksum is present, only those pages are rewritten that had
828	 * an I/O error in the block to be repaired, since it cannot be
829	 * determined, which copy of the other pages is better (and it
830	 * could happen otherwise that a correct page would be
831	 * overwritten by a bad one).
832	 */
833	for (mirror_index = 0;
834	     mirror_index < BTRFS_MAX_MIRRORS &&
835	     sblocks_for_recheck[mirror_index].page_count > 0;
836	     mirror_index++) {
837		struct scrub_block *sblock_other = sblocks_for_recheck +
838						   mirror_index;
839
840		if (!sblock_other->header_error &&
841		    !sblock_other->checksum_error &&
842		    sblock_other->no_io_error_seen) {
843			int force_write = is_metadata || have_csum;
844
845			ret = scrub_repair_block_from_good_copy(sblock_bad,
846								sblock_other,
847								force_write);
848			if (0 == ret)
849				goto corrected_error;
850		}
851	}
852
853	/*
854	 * in case of I/O errors in the area that is supposed to be
855	 * repaired, continue by picking good copies of those pages.
856	 * Select the good pages from mirrors to rewrite bad pages from
857	 * the area to fix. Afterwards verify the checksum of the block
858	 * that is supposed to be repaired. This verification step is
859	 * only done for the purpose of statistic counting and for the
860	 * final scrub report, whether errors remain.
861	 * A perfect algorithm could make use of the checksum and try
862	 * all possible combinations of pages from the different mirrors
863	 * until the checksum verification succeeds. For example, when
864	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
865	 * of mirror #2 is readable but the final checksum test fails,
866	 * then the 2nd page of mirror #3 could be tried, whether now
867	 * the final checksum succeedes. But this would be a rare
868	 * exception and is therefore not implemented. At least it is
869	 * avoided that the good copy is overwritten.
870	 * A more useful improvement would be to pick the sectors
871	 * without I/O error based on sector sizes (512 bytes on legacy
872	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
873	 * mirror could be repaired by taking 512 byte of a different
874	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
875	 * area are unreadable.
876	 */
877
878	/* can only fix I/O errors from here on */
879	if (sblock_bad->no_io_error_seen)
880		goto did_not_correct_error;
881
882	success = 1;
883	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
884		struct scrub_page *page_bad = sblock_bad->pagev + page_num;
885
886		if (!page_bad->io_error)
887			continue;
888
889		for (mirror_index = 0;
890		     mirror_index < BTRFS_MAX_MIRRORS &&
891		     sblocks_for_recheck[mirror_index].page_count > 0;
892		     mirror_index++) {
893			struct scrub_block *sblock_other = sblocks_for_recheck +
894							   mirror_index;
895			struct scrub_page *page_other = sblock_other->pagev +
896							page_num;
897
898			if (!page_other->io_error) {
899				ret = scrub_repair_page_from_good_copy(
900					sblock_bad, sblock_other, page_num, 0);
901				if (0 == ret) {
902					page_bad->io_error = 0;
903					break; /* succeeded for this page */
904				}
905			}
906		}
907
908		if (page_bad->io_error) {
909			/* did not find a mirror to copy the page from */
910			success = 0;
911		}
912	}
913
914	if (success) {
915		if (is_metadata || have_csum) {
916			/*
917			 * need to verify the checksum now that all
918			 * sectors on disk are repaired (the write
919			 * request for data to be repaired is on its way).
920			 * Just be lazy and use scrub_recheck_block()
921			 * which re-reads the data before the checksum
922			 * is verified, but most likely the data comes out
923			 * of the page cache.
924			 */
925			ret = scrub_recheck_block(fs_info, sblock_bad,
926						  is_metadata, have_csum, csum,
927						  generation, sdev->csum_size);
928			if (!ret && !sblock_bad->header_error &&
929			    !sblock_bad->checksum_error &&
930			    sblock_bad->no_io_error_seen)
931				goto corrected_error;
932			else
933				goto did_not_correct_error;
934		} else {
935corrected_error:
936			spin_lock(&sdev->stat_lock);
937			sdev->stat.corrected_errors++;
938			spin_unlock(&sdev->stat_lock);
939			printk_ratelimited(KERN_ERR
940				"btrfs: fixed up error at logical %llu on dev %s\n",
941				(unsigned long long)logical, sdev->dev->name);
942		}
943	} else {
944did_not_correct_error:
945		spin_lock(&sdev->stat_lock);
946		sdev->stat.uncorrectable_errors++;
947		spin_unlock(&sdev->stat_lock);
948		printk_ratelimited(KERN_ERR
949			"btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
950			(unsigned long long)logical, sdev->dev->name);
951	}
952
953out:
954	if (sblocks_for_recheck) {
955		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
956		     mirror_index++) {
957			struct scrub_block *sblock = sblocks_for_recheck +
958						     mirror_index;
959			int page_index;
960
961			for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
962			     page_index++)
963				if (sblock->pagev[page_index].page)
964					__free_page(
965						sblock->pagev[page_index].page);
966		}
967		kfree(sblocks_for_recheck);
968	}
969
970	return 0;
971}
972
973static int scrub_setup_recheck_block(struct scrub_dev *sdev,
974				     struct btrfs_mapping_tree *map_tree,
975				     u64 length, u64 logical,
976				     struct scrub_block *sblocks_for_recheck)
977{
978	int page_index;
979	int mirror_index;
980	int ret;
981
982	/*
983	 * note: the three members sdev, ref_count and outstanding_pages
984	 * are not used (and not set) in the blocks that are used for
985	 * the recheck procedure
986	 */
987
988	page_index = 0;
989	while (length > 0) {
990		u64 sublen = min_t(u64, length, PAGE_SIZE);
991		u64 mapped_length = sublen;
992		struct btrfs_bio *bbio = NULL;
993
994		/*
995		 * with a length of PAGE_SIZE, each returned stripe
996		 * represents one mirror
997		 */
998		ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
999				      &bbio, 0);
1000		if (ret || !bbio || mapped_length < sublen) {
1001			kfree(bbio);
1002			return -EIO;
1003		}
1004
1005		BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
1006		for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
1007		     mirror_index++) {
1008			struct scrub_block *sblock;
1009			struct scrub_page *page;
1010
1011			if (mirror_index >= BTRFS_MAX_MIRRORS)
1012				continue;
1013
1014			sblock = sblocks_for_recheck + mirror_index;
1015			page = sblock->pagev + page_index;
1016			page->logical = logical;
1017			page->physical = bbio->stripes[mirror_index].physical;
1018			/* for missing devices, dev->bdev is NULL */
1019			page->dev = bbio->stripes[mirror_index].dev;
1020			page->mirror_num = mirror_index + 1;
1021			page->page = alloc_page(GFP_NOFS);
1022			if (!page->page) {
1023				spin_lock(&sdev->stat_lock);
1024				sdev->stat.malloc_errors++;
1025				spin_unlock(&sdev->stat_lock);
1026				return -ENOMEM;
1027			}
1028			sblock->page_count++;
1029		}
1030		kfree(bbio);
1031		length -= sublen;
1032		logical += sublen;
1033		page_index++;
1034	}
1035
1036	return 0;
1037}
1038
1039/*
1040 * this function will check the on disk data for checksum errors, header
1041 * errors and read I/O errors. If any I/O errors happen, the exact pages
1042 * which are errored are marked as being bad. The goal is to enable scrub
1043 * to take those pages that are not errored from all the mirrors so that
1044 * the pages that are errored in the just handled mirror can be repaired.
1045 */
1046static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1047			       struct scrub_block *sblock, int is_metadata,
1048			       int have_csum, u8 *csum, u64 generation,
1049			       u16 csum_size)
1050{
1051	int page_num;
1052
1053	sblock->no_io_error_seen = 1;
1054	sblock->header_error = 0;
1055	sblock->checksum_error = 0;
1056
1057	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1058		struct bio *bio;
1059		int ret;
1060		struct scrub_page *page = sblock->pagev + page_num;
1061		DECLARE_COMPLETION_ONSTACK(complete);
1062
1063		if (page->dev->bdev == NULL) {
1064			page->io_error = 1;
1065			sblock->no_io_error_seen = 0;
1066			continue;
1067		}
1068
1069		BUG_ON(!page->page);
1070		bio = bio_alloc(GFP_NOFS, 1);
1071		if (!bio)
1072			return -EIO;
1073		bio->bi_bdev = page->dev->bdev;
1074		bio->bi_sector = page->physical >> 9;
1075		bio->bi_end_io = scrub_complete_bio_end_io;
1076		bio->bi_private = &complete;
1077
1078		ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
1079		if (PAGE_SIZE != ret) {
1080			bio_put(bio);
1081			return -EIO;
1082		}
1083		btrfsic_submit_bio(READ, bio);
1084
1085		/* this will also unplug the queue */
1086		wait_for_completion(&complete);
1087
1088		page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1089		if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1090			sblock->no_io_error_seen = 0;
1091		bio_put(bio);
1092	}
1093
1094	if (sblock->no_io_error_seen)
1095		scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1096					     have_csum, csum, generation,
1097					     csum_size);
1098
1099	return 0;
1100}
1101
1102static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1103					 struct scrub_block *sblock,
1104					 int is_metadata, int have_csum,
1105					 const u8 *csum, u64 generation,
1106					 u16 csum_size)
1107{
1108	int page_num;
1109	u8 calculated_csum[BTRFS_CSUM_SIZE];
1110	u32 crc = ~(u32)0;
1111	struct btrfs_root *root = fs_info->extent_root;
1112	void *mapped_buffer;
1113
1114	BUG_ON(!sblock->pagev[0].page);
1115	if (is_metadata) {
1116		struct btrfs_header *h;
1117
1118		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1119		h = (struct btrfs_header *)mapped_buffer;
1120
1121		if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
1122		    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1123		    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1124			   BTRFS_UUID_SIZE)) {
1125			sblock->header_error = 1;
1126		} else if (generation != le64_to_cpu(h->generation)) {
1127			sblock->header_error = 1;
1128			sblock->generation_error = 1;
1129		}
1130		csum = h->csum;
1131	} else {
1132		if (!have_csum)
1133			return;
1134
1135		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1136	}
1137
1138	for (page_num = 0;;) {
1139		if (page_num == 0 && is_metadata)
1140			crc = btrfs_csum_data(root,
1141				((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1142				crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1143		else
1144			crc = btrfs_csum_data(root, mapped_buffer, crc,
1145					      PAGE_SIZE);
1146
1147		kunmap_atomic(mapped_buffer);
1148		page_num++;
1149		if (page_num >= sblock->page_count)
1150			break;
1151		BUG_ON(!sblock->pagev[page_num].page);
1152
1153		mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
1154	}
1155
1156	btrfs_csum_final(crc, calculated_csum);
1157	if (memcmp(calculated_csum, csum, csum_size))
1158		sblock->checksum_error = 1;
1159}
1160
1161static void scrub_complete_bio_end_io(struct bio *bio, int err)
1162{
1163	complete((struct completion *)bio->bi_private);
1164}
1165
1166static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1167					     struct scrub_block *sblock_good,
1168					     int force_write)
1169{
1170	int page_num;
1171	int ret = 0;
1172
1173	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1174		int ret_sub;
1175
1176		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1177							   sblock_good,
1178							   page_num,
1179							   force_write);
1180		if (ret_sub)
1181			ret = ret_sub;
1182	}
1183
1184	return ret;
1185}
1186
1187static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1188					    struct scrub_block *sblock_good,
1189					    int page_num, int force_write)
1190{
1191	struct scrub_page *page_bad = sblock_bad->pagev + page_num;
1192	struct scrub_page *page_good = sblock_good->pagev + page_num;
1193
1194	BUG_ON(sblock_bad->pagev[page_num].page == NULL);
1195	BUG_ON(sblock_good->pagev[page_num].page == NULL);
1196	if (force_write || sblock_bad->header_error ||
1197	    sblock_bad->checksum_error || page_bad->io_error) {
1198		struct bio *bio;
1199		int ret;
1200		DECLARE_COMPLETION_ONSTACK(complete);
1201
1202		bio = bio_alloc(GFP_NOFS, 1);
1203		if (!bio)
1204			return -EIO;
1205		bio->bi_bdev = page_bad->dev->bdev;
1206		bio->bi_sector = page_bad->physical >> 9;
1207		bio->bi_end_io = scrub_complete_bio_end_io;
1208		bio->bi_private = &complete;
1209
1210		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1211		if (PAGE_SIZE != ret) {
1212			bio_put(bio);
1213			return -EIO;
1214		}
1215		btrfsic_submit_bio(WRITE, bio);
1216
1217		/* this will also unplug the queue */
1218		wait_for_completion(&complete);
1219		if (!bio_flagged(bio, BIO_UPTODATE)) {
1220			btrfs_dev_stat_inc_and_print(page_bad->dev,
1221				BTRFS_DEV_STAT_WRITE_ERRS);
1222			bio_put(bio);
1223			return -EIO;
1224		}
1225		bio_put(bio);
1226	}
1227
1228	return 0;
1229}
1230
1231static void scrub_checksum(struct scrub_block *sblock)
1232{
1233	u64 flags;
1234	int ret;
1235
1236	BUG_ON(sblock->page_count < 1);
1237	flags = sblock->pagev[0].flags;
1238	ret = 0;
1239	if (flags & BTRFS_EXTENT_FLAG_DATA)
1240		ret = scrub_checksum_data(sblock);
1241	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1242		ret = scrub_checksum_tree_block(sblock);
1243	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1244		(void)scrub_checksum_super(sblock);
1245	else
1246		WARN_ON(1);
1247	if (ret)
1248		scrub_handle_errored_block(sblock);
1249}
1250
1251static int scrub_checksum_data(struct scrub_block *sblock)
1252{
1253	struct scrub_dev *sdev = sblock->sdev;
1254	u8 csum[BTRFS_CSUM_SIZE];
1255	u8 *on_disk_csum;
1256	struct page *page;
1257	void *buffer;
1258	u32 crc = ~(u32)0;
1259	int fail = 0;
1260	struct btrfs_root *root = sdev->dev->dev_root;
1261	u64 len;
1262	int index;
1263
1264	BUG_ON(sblock->page_count < 1);
1265	if (!sblock->pagev[0].have_csum)
1266		return 0;
1267
1268	on_disk_csum = sblock->pagev[0].csum;
1269	page = sblock->pagev[0].page;
1270	buffer = kmap_atomic(page);
1271
1272	len = sdev->sectorsize;
1273	index = 0;
1274	for (;;) {
1275		u64 l = min_t(u64, len, PAGE_SIZE);
1276
1277		crc = btrfs_csum_data(root, buffer, crc, l);
1278		kunmap_atomic(buffer);
1279		len -= l;
1280		if (len == 0)
1281			break;
1282		index++;
1283		BUG_ON(index >= sblock->page_count);
1284		BUG_ON(!sblock->pagev[index].page);
1285		page = sblock->pagev[index].page;
1286		buffer = kmap_atomic(page);
1287	}
1288
1289	btrfs_csum_final(crc, csum);
1290	if (memcmp(csum, on_disk_csum, sdev->csum_size))
1291		fail = 1;
1292
1293	return fail;
1294}
1295
1296static int scrub_checksum_tree_block(struct scrub_block *sblock)
1297{
1298	struct scrub_dev *sdev = sblock->sdev;
1299	struct btrfs_header *h;
1300	struct btrfs_root *root = sdev->dev->dev_root;
1301	struct btrfs_fs_info *fs_info = root->fs_info;
1302	u8 calculated_csum[BTRFS_CSUM_SIZE];
1303	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1304	struct page *page;
1305	void *mapped_buffer;
1306	u64 mapped_size;
1307	void *p;
1308	u32 crc = ~(u32)0;
1309	int fail = 0;
1310	int crc_fail = 0;
1311	u64 len;
1312	int index;
1313
1314	BUG_ON(sblock->page_count < 1);
1315	page = sblock->pagev[0].page;
1316	mapped_buffer = kmap_atomic(page);
1317	h = (struct btrfs_header *)mapped_buffer;
1318	memcpy(on_disk_csum, h->csum, sdev->csum_size);
1319
1320	/*
1321	 * we don't use the getter functions here, as we
1322	 * a) don't have an extent buffer and
1323	 * b) the page is already kmapped
1324	 */
1325
1326	if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
1327		++fail;
1328
1329	if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
1330		++fail;
1331
1332	if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1333		++fail;
1334
1335	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1336		   BTRFS_UUID_SIZE))
1337		++fail;
1338
1339	BUG_ON(sdev->nodesize != sdev->leafsize);
1340	len = sdev->nodesize - BTRFS_CSUM_SIZE;
1341	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1342	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1343	index = 0;
1344	for (;;) {
1345		u64 l = min_t(u64, len, mapped_size);
1346
1347		crc = btrfs_csum_data(root, p, crc, l);
1348		kunmap_atomic(mapped_buffer);
1349		len -= l;
1350		if (len == 0)
1351			break;
1352		index++;
1353		BUG_ON(index >= sblock->page_count);
1354		BUG_ON(!sblock->pagev[index].page);
1355		page = sblock->pagev[index].page;
1356		mapped_buffer = kmap_atomic(page);
1357		mapped_size = PAGE_SIZE;
1358		p = mapped_buffer;
1359	}
1360
1361	btrfs_csum_final(crc, calculated_csum);
1362	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1363		++crc_fail;
1364
1365	return fail || crc_fail;
1366}
1367
1368static int scrub_checksum_super(struct scrub_block *sblock)
1369{
1370	struct btrfs_super_block *s;
1371	struct scrub_dev *sdev = sblock->sdev;
1372	struct btrfs_root *root = sdev->dev->dev_root;
1373	struct btrfs_fs_info *fs_info = root->fs_info;
1374	u8 calculated_csum[BTRFS_CSUM_SIZE];
1375	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1376	struct page *page;
1377	void *mapped_buffer;
1378	u64 mapped_size;
1379	void *p;
1380	u32 crc = ~(u32)0;
1381	int fail_gen = 0;
1382	int fail_cor = 0;
1383	u64 len;
1384	int index;
1385
1386	BUG_ON(sblock->page_count < 1);
1387	page = sblock->pagev[0].page;
1388	mapped_buffer = kmap_atomic(page);
1389	s = (struct btrfs_super_block *)mapped_buffer;
1390	memcpy(on_disk_csum, s->csum, sdev->csum_size);
1391
1392	if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
1393		++fail_cor;
1394
1395	if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
1396		++fail_gen;
1397
1398	if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1399		++fail_cor;
1400
1401	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1402	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1403	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1404	index = 0;
1405	for (;;) {
1406		u64 l = min_t(u64, len, mapped_size);
1407
1408		crc = btrfs_csum_data(root, p, crc, l);
1409		kunmap_atomic(mapped_buffer);
1410		len -= l;
1411		if (len == 0)
1412			break;
1413		index++;
1414		BUG_ON(index >= sblock->page_count);
1415		BUG_ON(!sblock->pagev[index].page);
1416		page = sblock->pagev[index].page;
1417		mapped_buffer = kmap_atomic(page);
1418		mapped_size = PAGE_SIZE;
1419		p = mapped_buffer;
1420	}
1421
1422	btrfs_csum_final(crc, calculated_csum);
1423	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1424		++fail_cor;
1425
1426	if (fail_cor + fail_gen) {
1427		/*
1428		 * if we find an error in a super block, we just report it.
1429		 * They will get written with the next transaction commit
1430		 * anyway
1431		 */
1432		spin_lock(&sdev->stat_lock);
1433		++sdev->stat.super_errors;
1434		spin_unlock(&sdev->stat_lock);
1435		if (fail_cor)
1436			btrfs_dev_stat_inc_and_print(sdev->dev,
1437				BTRFS_DEV_STAT_CORRUPTION_ERRS);
1438		else
1439			btrfs_dev_stat_inc_and_print(sdev->dev,
1440				BTRFS_DEV_STAT_GENERATION_ERRS);
1441	}
1442
1443	return fail_cor + fail_gen;
1444}
1445
1446static void scrub_block_get(struct scrub_block *sblock)
1447{
1448	atomic_inc(&sblock->ref_count);
1449}
1450
1451static void scrub_block_put(struct scrub_block *sblock)
1452{
1453	if (atomic_dec_and_test(&sblock->ref_count)) {
1454		int i;
1455
1456		for (i = 0; i < sblock->page_count; i++)
1457			if (sblock->pagev[i].page)
1458				__free_page(sblock->pagev[i].page);
1459		kfree(sblock);
1460	}
1461}
1462
1463static void scrub_submit(struct scrub_dev *sdev)
1464{
1465	struct scrub_bio *sbio;
1466
1467	if (sdev->curr == -1)
1468		return;
1469
1470	sbio = sdev->bios[sdev->curr];
1471	sdev->curr = -1;
1472	atomic_inc(&sdev->in_flight);
1473
1474	btrfsic_submit_bio(READ, sbio->bio);
1475}
1476
1477static int scrub_add_page_to_bio(struct scrub_dev *sdev,
1478				 struct scrub_page *spage)
1479{
1480	struct scrub_block *sblock = spage->sblock;
1481	struct scrub_bio *sbio;
1482	int ret;
1483
1484again:
1485	/*
1486	 * grab a fresh bio or wait for one to become available
1487	 */
1488	while (sdev->curr == -1) {
1489		spin_lock(&sdev->list_lock);
1490		sdev->curr = sdev->first_free;
1491		if (sdev->curr != -1) {
1492			sdev->first_free = sdev->bios[sdev->curr]->next_free;
1493			sdev->bios[sdev->curr]->next_free = -1;
1494			sdev->bios[sdev->curr]->page_count = 0;
1495			spin_unlock(&sdev->list_lock);
1496		} else {
1497			spin_unlock(&sdev->list_lock);
1498			wait_event(sdev->list_wait, sdev->first_free != -1);
1499		}
1500	}
1501	sbio = sdev->bios[sdev->curr];
1502	if (sbio->page_count == 0) {
1503		struct bio *bio;
1504
1505		sbio->physical = spage->physical;
1506		sbio->logical = spage->logical;
1507		bio = sbio->bio;
1508		if (!bio) {
1509			bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
1510			if (!bio)
1511				return -ENOMEM;
1512			sbio->bio = bio;
1513		}
1514
1515		bio->bi_private = sbio;
1516		bio->bi_end_io = scrub_bio_end_io;
1517		bio->bi_bdev = sdev->dev->bdev;
1518		bio->bi_sector = spage->physical >> 9;
1519		sbio->err = 0;
1520	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1521		   spage->physical ||
1522		   sbio->logical + sbio->page_count * PAGE_SIZE !=
1523		   spage->logical) {
1524		scrub_submit(sdev);
1525		goto again;
1526	}
1527
1528	sbio->pagev[sbio->page_count] = spage;
1529	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1530	if (ret != PAGE_SIZE) {
1531		if (sbio->page_count < 1) {
1532			bio_put(sbio->bio);
1533			sbio->bio = NULL;
1534			return -EIO;
1535		}
1536		scrub_submit(sdev);
1537		goto again;
1538	}
1539
1540	scrub_block_get(sblock); /* one for the added page */
1541	atomic_inc(&sblock->outstanding_pages);
1542	sbio->page_count++;
1543	if (sbio->page_count == sdev->pages_per_bio)
1544		scrub_submit(sdev);
1545
1546	return 0;
1547}
1548
1549static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1550		       u64 physical, u64 flags, u64 gen, int mirror_num,
1551		       u8 *csum, int force)
1552{
1553	struct scrub_block *sblock;
1554	int index;
1555
1556	sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1557	if (!sblock) {
1558		spin_lock(&sdev->stat_lock);
1559		sdev->stat.malloc_errors++;
1560		spin_unlock(&sdev->stat_lock);
1561		return -ENOMEM;
1562	}
1563
1564	/* one ref inside this function, plus one for each page later on */
1565	atomic_set(&sblock->ref_count, 1);
1566	sblock->sdev = sdev;
1567	sblock->no_io_error_seen = 1;
1568
1569	for (index = 0; len > 0; index++) {
1570		struct scrub_page *spage = sblock->pagev + index;
1571		u64 l = min_t(u64, len, PAGE_SIZE);
1572
1573		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
1574		spage->page = alloc_page(GFP_NOFS);
1575		if (!spage->page) {
1576			spin_lock(&sdev->stat_lock);
1577			sdev->stat.malloc_errors++;
1578			spin_unlock(&sdev->stat_lock);
1579			while (index > 0) {
1580				index--;
1581				__free_page(sblock->pagev[index].page);
1582			}
1583			kfree(sblock);
1584			return -ENOMEM;
1585		}
1586		spage->sblock = sblock;
1587		spage->dev = sdev->dev;
1588		spage->flags = flags;
1589		spage->generation = gen;
1590		spage->logical = logical;
1591		spage->physical = physical;
1592		spage->mirror_num = mirror_num;
1593		if (csum) {
1594			spage->have_csum = 1;
1595			memcpy(spage->csum, csum, sdev->csum_size);
1596		} else {
1597			spage->have_csum = 0;
1598		}
1599		sblock->page_count++;
1600		len -= l;
1601		logical += l;
1602		physical += l;
1603	}
1604
1605	BUG_ON(sblock->page_count == 0);
1606	for (index = 0; index < sblock->page_count; index++) {
1607		struct scrub_page *spage = sblock->pagev + index;
1608		int ret;
1609
1610		ret = scrub_add_page_to_bio(sdev, spage);
1611		if (ret) {
1612			scrub_block_put(sblock);
1613			return ret;
1614		}
1615	}
1616
1617	if (force)
1618		scrub_submit(sdev);
1619
1620	/* last one frees, either here or in bio completion for last page */
1621	scrub_block_put(sblock);
1622	return 0;
1623}
1624
1625static void scrub_bio_end_io(struct bio *bio, int err)
1626{
1627	struct scrub_bio *sbio = bio->bi_private;
1628	struct scrub_dev *sdev = sbio->sdev;
1629	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1630
1631	sbio->err = err;
1632	sbio->bio = bio;
1633
1634	btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
1635}
1636
1637static void scrub_bio_end_io_worker(struct btrfs_work *work)
1638{
1639	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1640	struct scrub_dev *sdev = sbio->sdev;
1641	int i;
1642
1643	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
1644	if (sbio->err) {
1645		for (i = 0; i < sbio->page_count; i++) {
1646			struct scrub_page *spage = sbio->pagev[i];
1647
1648			spage->io_error = 1;
1649			spage->sblock->no_io_error_seen = 0;
1650		}
1651	}
1652
1653	/* now complete the scrub_block items that have all pages completed */
1654	for (i = 0; i < sbio->page_count; i++) {
1655		struct scrub_page *spage = sbio->pagev[i];
1656		struct scrub_block *sblock = spage->sblock;
1657
1658		if (atomic_dec_and_test(&sblock->outstanding_pages))
1659			scrub_block_complete(sblock);
1660		scrub_block_put(sblock);
1661	}
1662
1663	if (sbio->err) {
1664		/* what is this good for??? */
1665		sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1666		sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
1667		sbio->bio->bi_phys_segments = 0;
1668		sbio->bio->bi_idx = 0;
1669
1670		for (i = 0; i < sbio->page_count; i++) {
1671			struct bio_vec *bi;
1672			bi = &sbio->bio->bi_io_vec[i];
1673			bi->bv_offset = 0;
1674			bi->bv_len = PAGE_SIZE;
1675		}
1676	}
1677
1678	bio_put(sbio->bio);
1679	sbio->bio = NULL;
1680	spin_lock(&sdev->list_lock);
1681	sbio->next_free = sdev->first_free;
1682	sdev->first_free = sbio->index;
1683	spin_unlock(&sdev->list_lock);
1684	atomic_dec(&sdev->in_flight);
1685	wake_up(&sdev->list_wait);
1686}
1687
1688static void scrub_block_complete(struct scrub_block *sblock)
1689{
1690	if (!sblock->no_io_error_seen)
1691		scrub_handle_errored_block(sblock);
1692	else
1693		scrub_checksum(sblock);
1694}
1695
1696static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1697			   u8 *csum)
1698{
1699	struct btrfs_ordered_sum *sum = NULL;
1700	int ret = 0;
1701	unsigned long i;
1702	unsigned long num_sectors;
1703
1704	while (!list_empty(&sdev->csum_list)) {
1705		sum = list_first_entry(&sdev->csum_list,
1706				       struct btrfs_ordered_sum, list);
1707		if (sum->bytenr > logical)
1708			return 0;
1709		if (sum->bytenr + sum->len > logical)
1710			break;
1711
1712		++sdev->stat.csum_discards;
1713		list_del(&sum->list);
1714		kfree(sum);
1715		sum = NULL;
1716	}
1717	if (!sum)
1718		return 0;
1719
1720	num_sectors = sum->len / sdev->sectorsize;
1721	for (i = 0; i < num_sectors; ++i) {
1722		if (sum->sums[i].bytenr == logical) {
1723			memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
1724			ret = 1;
1725			break;
1726		}
1727	}
1728	if (ret && i == num_sectors - 1) {
1729		list_del(&sum->list);
1730		kfree(sum);
1731	}
1732	return ret;
1733}
1734
1735/* scrub extent tries to collect up to 64 kB for each bio */
1736static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1737			u64 physical, u64 flags, u64 gen, int mirror_num)
1738{
1739	int ret;
1740	u8 csum[BTRFS_CSUM_SIZE];
1741	u32 blocksize;
1742
1743	if (flags & BTRFS_EXTENT_FLAG_DATA) {
1744		blocksize = sdev->sectorsize;
1745		spin_lock(&sdev->stat_lock);
1746		sdev->stat.data_extents_scrubbed++;
1747		sdev->stat.data_bytes_scrubbed += len;
1748		spin_unlock(&sdev->stat_lock);
1749	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1750		BUG_ON(sdev->nodesize != sdev->leafsize);
1751		blocksize = sdev->nodesize;
1752		spin_lock(&sdev->stat_lock);
1753		sdev->stat.tree_extents_scrubbed++;
1754		sdev->stat.tree_bytes_scrubbed += len;
1755		spin_unlock(&sdev->stat_lock);
1756	} else {
1757		blocksize = sdev->sectorsize;
1758		BUG_ON(1);
1759	}
1760
1761	while (len) {
1762		u64 l = min_t(u64, len, blocksize);
1763		int have_csum = 0;
1764
1765		if (flags & BTRFS_EXTENT_FLAG_DATA) {
1766			/* push csums to sbio */
1767			have_csum = scrub_find_csum(sdev, logical, l, csum);
1768			if (have_csum == 0)
1769				++sdev->stat.no_csum;
1770		}
1771		ret = scrub_pages(sdev, logical, l, physical, flags, gen,
1772				  mirror_num, have_csum ? csum : NULL, 0);
1773		if (ret)
1774			return ret;
1775		len -= l;
1776		logical += l;
1777		physical += l;
1778	}
1779	return 0;
1780}
1781
1782static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1783	struct map_lookup *map, int num, u64 base, u64 length)
1784{
1785	struct btrfs_path *path;
1786	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1787	struct btrfs_root *root = fs_info->extent_root;
1788	struct btrfs_root *csum_root = fs_info->csum_root;
1789	struct btrfs_extent_item *extent;
1790	struct blk_plug plug;
1791	u64 flags;
1792	int ret;
1793	int slot;
1794	int i;
1795	u64 nstripes;
1796	struct extent_buffer *l;
1797	struct btrfs_key key;
1798	u64 physical;
1799	u64 logical;
1800	u64 generation;
1801	int mirror_num;
1802	struct reada_control *reada1;
1803	struct reada_control *reada2;
1804	struct btrfs_key key_start;
1805	struct btrfs_key key_end;
1806
1807	u64 increment = map->stripe_len;
1808	u64 offset;
1809
1810	nstripes = length;
1811	offset = 0;
1812	do_div(nstripes, map->stripe_len);
1813	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1814		offset = map->stripe_len * num;
1815		increment = map->stripe_len * map->num_stripes;
1816		mirror_num = 1;
1817	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1818		int factor = map->num_stripes / map->sub_stripes;
1819		offset = map->stripe_len * (num / map->sub_stripes);
1820		increment = map->stripe_len * factor;
1821		mirror_num = num % map->sub_stripes + 1;
1822	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1823		increment = map->stripe_len;
1824		mirror_num = num % map->num_stripes + 1;
1825	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1826		increment = map->stripe_len;
1827		mirror_num = num % map->num_stripes + 1;
1828	} else {
1829		increment = map->stripe_len;
1830		mirror_num = 1;
1831	}
1832
1833	path = btrfs_alloc_path();
1834	if (!path)
1835		return -ENOMEM;
1836
1837	/*
1838	 * work on commit root. The related disk blocks are static as
1839	 * long as COW is applied. This means, it is save to rewrite
1840	 * them to repair disk errors without any race conditions
1841	 */
1842	path->search_commit_root = 1;
1843	path->skip_locking = 1;
1844
1845	/*
1846	 * trigger the readahead for extent tree csum tree and wait for
1847	 * completion. During readahead, the scrub is officially paused
1848	 * to not hold off transaction commits
1849	 */
1850	logical = base + offset;
1851
1852	wait_event(sdev->list_wait,
1853		   atomic_read(&sdev->in_flight) == 0);
1854	atomic_inc(&fs_info->scrubs_paused);
1855	wake_up(&fs_info->scrub_pause_wait);
1856
1857	/* FIXME it might be better to start readahead at commit root */
1858	key_start.objectid = logical;
1859	key_start.type = BTRFS_EXTENT_ITEM_KEY;
1860	key_start.offset = (u64)0;
1861	key_end.objectid = base + offset + nstripes * increment;
1862	key_end.type = BTRFS_EXTENT_ITEM_KEY;
1863	key_end.offset = (u64)0;
1864	reada1 = btrfs_reada_add(root, &key_start, &key_end);
1865
1866	key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1867	key_start.type = BTRFS_EXTENT_CSUM_KEY;
1868	key_start.offset = logical;
1869	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1870	key_end.type = BTRFS_EXTENT_CSUM_KEY;
1871	key_end.offset = base + offset + nstripes * increment;
1872	reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
1873
1874	if (!IS_ERR(reada1))
1875		btrfs_reada_wait(reada1);
1876	if (!IS_ERR(reada2))
1877		btrfs_reada_wait(reada2);
1878
1879	mutex_lock(&fs_info->scrub_lock);
1880	while (atomic_read(&fs_info->scrub_pause_req)) {
1881		mutex_unlock(&fs_info->scrub_lock);
1882		wait_event(fs_info->scrub_pause_wait,
1883		   atomic_read(&fs_info->scrub_pause_req) == 0);
1884		mutex_lock(&fs_info->scrub_lock);
1885	}
1886	atomic_dec(&fs_info->scrubs_paused);
1887	mutex_unlock(&fs_info->scrub_lock);
1888	wake_up(&fs_info->scrub_pause_wait);
1889
1890	/*
1891	 * collect all data csums for the stripe to avoid seeking during
1892	 * the scrub. This might currently (crc32) end up to be about 1MB
1893	 */
1894	blk_start_plug(&plug);
1895
1896	/*
1897	 * now find all extents for each stripe and scrub them
1898	 */
1899	logical = base + offset;
1900	physical = map->stripes[num].physical;
1901	ret = 0;
1902	for (i = 0; i < nstripes; ++i) {
1903		/*
1904		 * canceled?
1905		 */
1906		if (atomic_read(&fs_info->scrub_cancel_req) ||
1907		    atomic_read(&sdev->cancel_req)) {
1908			ret = -ECANCELED;
1909			goto out;
1910		}
1911		/*
1912		 * check to see if we have to pause
1913		 */
1914		if (atomic_read(&fs_info->scrub_pause_req)) {
1915			/* push queued extents */
1916			scrub_submit(sdev);
1917			wait_event(sdev->list_wait,
1918				   atomic_read(&sdev->in_flight) == 0);
1919			atomic_inc(&fs_info->scrubs_paused);
1920			wake_up(&fs_info->scrub_pause_wait);
1921			mutex_lock(&fs_info->scrub_lock);
1922			while (atomic_read(&fs_info->scrub_pause_req)) {
1923				mutex_unlock(&fs_info->scrub_lock);
1924				wait_event(fs_info->scrub_pause_wait,
1925				   atomic_read(&fs_info->scrub_pause_req) == 0);
1926				mutex_lock(&fs_info->scrub_lock);
1927			}
1928			atomic_dec(&fs_info->scrubs_paused);
1929			mutex_unlock(&fs_info->scrub_lock);
1930			wake_up(&fs_info->scrub_pause_wait);
1931		}
1932
1933		ret = btrfs_lookup_csums_range(csum_root, logical,
1934					       logical + map->stripe_len - 1,
1935					       &sdev->csum_list, 1);
1936		if (ret)
1937			goto out;
1938
1939		key.objectid = logical;
1940		key.type = BTRFS_EXTENT_ITEM_KEY;
1941		key.offset = (u64)0;
1942
1943		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1944		if (ret < 0)
1945			goto out;
1946		if (ret > 0) {
1947			ret = btrfs_previous_item(root, path, 0,
1948						  BTRFS_EXTENT_ITEM_KEY);
1949			if (ret < 0)
1950				goto out;
1951			if (ret > 0) {
1952				/* there's no smaller item, so stick with the
1953				 * larger one */
1954				btrfs_release_path(path);
1955				ret = btrfs_search_slot(NULL, root, &key,
1956							path, 0, 0);
1957				if (ret < 0)
1958					goto out;
1959			}
1960		}
1961
1962		while (1) {
1963			l = path->nodes[0];
1964			slot = path->slots[0];
1965			if (slot >= btrfs_header_nritems(l)) {
1966				ret = btrfs_next_leaf(root, path);
1967				if (ret == 0)
1968					continue;
1969				if (ret < 0)
1970					goto out;
1971
1972				break;
1973			}
1974			btrfs_item_key_to_cpu(l, &key, slot);
1975
1976			if (key.objectid + key.offset <= logical)
1977				goto next;
1978
1979			if (key.objectid >= logical + map->stripe_len)
1980				break;
1981
1982			if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1983				goto next;
1984
1985			extent = btrfs_item_ptr(l, slot,
1986						struct btrfs_extent_item);
1987			flags = btrfs_extent_flags(l, extent);
1988			generation = btrfs_extent_generation(l, extent);
1989
1990			if (key.objectid < logical &&
1991			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
1992				printk(KERN_ERR
1993				       "btrfs scrub: tree block %llu spanning "
1994				       "stripes, ignored. logical=%llu\n",
1995				       (unsigned long long)key.objectid,
1996				       (unsigned long long)logical);
1997				goto next;
1998			}
1999
2000			/*
2001			 * trim extent to this stripe
2002			 */
2003			if (key.objectid < logical) {
2004				key.offset -= logical - key.objectid;
2005				key.objectid = logical;
2006			}
2007			if (key.objectid + key.offset >
2008			    logical + map->stripe_len) {
2009				key.offset = logical + map->stripe_len -
2010					     key.objectid;
2011			}
2012
2013			ret = scrub_extent(sdev, key.objectid, key.offset,
2014					   key.objectid - logical + physical,
2015					   flags, generation, mirror_num);
2016			if (ret)
2017				goto out;
2018
2019next:
2020			path->slots[0]++;
2021		}
2022		btrfs_release_path(path);
2023		logical += increment;
2024		physical += map->stripe_len;
2025		spin_lock(&sdev->stat_lock);
2026		sdev->stat.last_physical = physical;
2027		spin_unlock(&sdev->stat_lock);
2028	}
2029	/* push queued extents */
2030	scrub_submit(sdev);
2031
2032out:
2033	blk_finish_plug(&plug);
2034	btrfs_free_path(path);
2035	return ret < 0 ? ret : 0;
2036}
2037
2038static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
2039	u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
2040	u64 dev_offset)
2041{
2042	struct btrfs_mapping_tree *map_tree =
2043		&sdev->dev->dev_root->fs_info->mapping_tree;
2044	struct map_lookup *map;
2045	struct extent_map *em;
2046	int i;
2047	int ret = -EINVAL;
2048
2049	read_lock(&map_tree->map_tree.lock);
2050	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2051	read_unlock(&map_tree->map_tree.lock);
2052
2053	if (!em)
2054		return -EINVAL;
2055
2056	map = (struct map_lookup *)em->bdev;
2057	if (em->start != chunk_offset)
2058		goto out;
2059
2060	if (em->len < length)
2061		goto out;
2062
2063	for (i = 0; i < map->num_stripes; ++i) {
2064		if (map->stripes[i].dev == sdev->dev &&
2065		    map->stripes[i].physical == dev_offset) {
2066			ret = scrub_stripe(sdev, map, i, chunk_offset, length);
2067			if (ret)
2068				goto out;
2069		}
2070	}
2071out:
2072	free_extent_map(em);
2073
2074	return ret;
2075}
2076
2077static noinline_for_stack
2078int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
2079{
2080	struct btrfs_dev_extent *dev_extent = NULL;
2081	struct btrfs_path *path;
2082	struct btrfs_root *root = sdev->dev->dev_root;
2083	struct btrfs_fs_info *fs_info = root->fs_info;
2084	u64 length;
2085	u64 chunk_tree;
2086	u64 chunk_objectid;
2087	u64 chunk_offset;
2088	int ret;
2089	int slot;
2090	struct extent_buffer *l;
2091	struct btrfs_key key;
2092	struct btrfs_key found_key;
2093	struct btrfs_block_group_cache *cache;
2094
2095	path = btrfs_alloc_path();
2096	if (!path)
2097		return -ENOMEM;
2098
2099	path->reada = 2;
2100	path->search_commit_root = 1;
2101	path->skip_locking = 1;
2102
2103	key.objectid = sdev->dev->devid;
2104	key.offset = 0ull;
2105	key.type = BTRFS_DEV_EXTENT_KEY;
2106
2107
2108	while (1) {
2109		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2110		if (ret < 0)
2111			break;
2112		if (ret > 0) {
2113			if (path->slots[0] >=
2114			    btrfs_header_nritems(path->nodes[0])) {
2115				ret = btrfs_next_leaf(root, path);
2116				if (ret)
2117					break;
2118			}
2119		}
2120
2121		l = path->nodes[0];
2122		slot = path->slots[0];
2123
2124		btrfs_item_key_to_cpu(l, &found_key, slot);
2125
2126		if (found_key.objectid != sdev->dev->devid)
2127			break;
2128
2129		if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
2130			break;
2131
2132		if (found_key.offset >= end)
2133			break;
2134
2135		if (found_key.offset < key.offset)
2136			break;
2137
2138		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2139		length = btrfs_dev_extent_length(l, dev_extent);
2140
2141		if (found_key.offset + length <= start) {
2142			key.offset = found_key.offset + length;
2143			btrfs_release_path(path);
2144			continue;
2145		}
2146
2147		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2148		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2149		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2150
2151		/*
2152		 * get a reference on the corresponding block group to prevent
2153		 * the chunk from going away while we scrub it
2154		 */
2155		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2156		if (!cache) {
2157			ret = -ENOENT;
2158			break;
2159		}
2160		ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
2161				  chunk_offset, length, found_key.offset);
2162		btrfs_put_block_group(cache);
2163		if (ret)
2164			break;
2165
2166		key.offset = found_key.offset + length;
2167		btrfs_release_path(path);
2168	}
2169
2170	btrfs_free_path(path);
2171
2172	/*
2173	 * ret can still be 1 from search_slot or next_leaf,
2174	 * that's not an error
2175	 */
2176	return ret < 0 ? ret : 0;
2177}
2178
2179static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
2180{
2181	int	i;
2182	u64	bytenr;
2183	u64	gen;
2184	int	ret;
2185	struct btrfs_device *device = sdev->dev;
2186	struct btrfs_root *root = device->dev_root;
2187
2188	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2189		return -EIO;
2190
2191	gen = root->fs_info->last_trans_committed;
2192
2193	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2194		bytenr = btrfs_sb_offset(i);
2195		if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
2196			break;
2197
2198		ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2199				     BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
2200		if (ret)
2201			return ret;
2202	}
2203	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2204
2205	return 0;
2206}
2207
2208/*
2209 * get a reference count on fs_info->scrub_workers. start worker if necessary
2210 */
2211static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
2212{
2213	struct btrfs_fs_info *fs_info = root->fs_info;
2214	int ret = 0;
2215
2216	mutex_lock(&fs_info->scrub_lock);
2217	if (fs_info->scrub_workers_refcnt == 0) {
2218		btrfs_init_workers(&fs_info->scrub_workers, "scrub",
2219			   fs_info->thread_pool_size, &fs_info->generic_worker);
2220		fs_info->scrub_workers.idle_thresh = 4;
2221		ret = btrfs_start_workers(&fs_info->scrub_workers);
2222		if (ret)
2223			goto out;
2224	}
2225	++fs_info->scrub_workers_refcnt;
2226out:
2227	mutex_unlock(&fs_info->scrub_lock);
2228
2229	return ret;
2230}
2231
2232static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
2233{
2234	struct btrfs_fs_info *fs_info = root->fs_info;
2235
2236	mutex_lock(&fs_info->scrub_lock);
2237	if (--fs_info->scrub_workers_refcnt == 0)
2238		btrfs_stop_workers(&fs_info->scrub_workers);
2239	WARN_ON(fs_info->scrub_workers_refcnt < 0);
2240	mutex_unlock(&fs_info->scrub_lock);
2241}
2242
2243
2244int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2245		    struct btrfs_scrub_progress *progress, int readonly)
2246{
2247	struct scrub_dev *sdev;
2248	struct btrfs_fs_info *fs_info = root->fs_info;
2249	int ret;
2250	struct btrfs_device *dev;
2251
2252	if (btrfs_fs_closing(root->fs_info))
2253		return -EINVAL;
2254
2255	/*
2256	 * check some assumptions
2257	 */
2258	if (root->nodesize != root->leafsize) {
2259		printk(KERN_ERR
2260		       "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2261		       root->nodesize, root->leafsize);
2262		return -EINVAL;
2263	}
2264
2265	if (root->nodesize > BTRFS_STRIPE_LEN) {
2266		/*
2267		 * in this case scrub is unable to calculate the checksum
2268		 * the way scrub is implemented. Do not handle this
2269		 * situation at all because it won't ever happen.
2270		 */
2271		printk(KERN_ERR
2272		       "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2273		       root->nodesize, BTRFS_STRIPE_LEN);
2274		return -EINVAL;
2275	}
2276
2277	if (root->sectorsize != PAGE_SIZE) {
2278		/* not supported for data w/o checksums */
2279		printk(KERN_ERR
2280		       "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2281		       root->sectorsize, (unsigned long long)PAGE_SIZE);
2282		return -EINVAL;
2283	}
2284
2285	ret = scrub_workers_get(root);
2286	if (ret)
2287		return ret;
2288
2289	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2290	dev = btrfs_find_device(root, devid, NULL, NULL);
2291	if (!dev || dev->missing) {
2292		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2293		scrub_workers_put(root);
2294		return -ENODEV;
2295	}
2296	mutex_lock(&fs_info->scrub_lock);
2297
2298	if (!dev->in_fs_metadata) {
2299		mutex_unlock(&fs_info->scrub_lock);
2300		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2301		scrub_workers_put(root);
2302		return -ENODEV;
2303	}
2304
2305	if (dev->scrub_device) {
2306		mutex_unlock(&fs_info->scrub_lock);
2307		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2308		scrub_workers_put(root);
2309		return -EINPROGRESS;
2310	}
2311	sdev = scrub_setup_dev(dev);
2312	if (IS_ERR(sdev)) {
2313		mutex_unlock(&fs_info->scrub_lock);
2314		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2315		scrub_workers_put(root);
2316		return PTR_ERR(sdev);
2317	}
2318	sdev->readonly = readonly;
2319	dev->scrub_device = sdev;
2320
2321	atomic_inc(&fs_info->scrubs_running);
2322	mutex_unlock(&fs_info->scrub_lock);
2323	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2324
2325	down_read(&fs_info->scrub_super_lock);
2326	ret = scrub_supers(sdev);
2327	up_read(&fs_info->scrub_super_lock);
2328
2329	if (!ret)
2330		ret = scrub_enumerate_chunks(sdev, start, end);
2331
2332	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2333	atomic_dec(&fs_info->scrubs_running);
2334	wake_up(&fs_info->scrub_pause_wait);
2335
2336	wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
2337
2338	if (progress)
2339		memcpy(progress, &sdev->stat, sizeof(*progress));
2340
2341	mutex_lock(&fs_info->scrub_lock);
2342	dev->scrub_device = NULL;
2343	mutex_unlock(&fs_info->scrub_lock);
2344
2345	scrub_free_dev(sdev);
2346	scrub_workers_put(root);
2347
2348	return ret;
2349}
2350
2351void btrfs_scrub_pause(struct btrfs_root *root)
2352{
2353	struct btrfs_fs_info *fs_info = root->fs_info;
2354
2355	mutex_lock(&fs_info->scrub_lock);
2356	atomic_inc(&fs_info->scrub_pause_req);
2357	while (atomic_read(&fs_info->scrubs_paused) !=
2358	       atomic_read(&fs_info->scrubs_running)) {
2359		mutex_unlock(&fs_info->scrub_lock);
2360		wait_event(fs_info->scrub_pause_wait,
2361			   atomic_read(&fs_info->scrubs_paused) ==
2362			   atomic_read(&fs_info->scrubs_running));
2363		mutex_lock(&fs_info->scrub_lock);
2364	}
2365	mutex_unlock(&fs_info->scrub_lock);
2366}
2367
2368void btrfs_scrub_continue(struct btrfs_root *root)
2369{
2370	struct btrfs_fs_info *fs_info = root->fs_info;
2371
2372	atomic_dec(&fs_info->scrub_pause_req);
2373	wake_up(&fs_info->scrub_pause_wait);
2374}
2375
2376void btrfs_scrub_pause_super(struct btrfs_root *root)
2377{
2378	down_write(&root->fs_info->scrub_super_lock);
2379}
2380
2381void btrfs_scrub_continue_super(struct btrfs_root *root)
2382{
2383	up_write(&root->fs_info->scrub_super_lock);
2384}
2385
2386int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2387{
2388
2389	mutex_lock(&fs_info->scrub_lock);
2390	if (!atomic_read(&fs_info->scrubs_running)) {
2391		mutex_unlock(&fs_info->scrub_lock);
2392		return -ENOTCONN;
2393	}
2394
2395	atomic_inc(&fs_info->scrub_cancel_req);
2396	while (atomic_read(&fs_info->scrubs_running)) {
2397		mutex_unlock(&fs_info->scrub_lock);
2398		wait_event(fs_info->scrub_pause_wait,
2399			   atomic_read(&fs_info->scrubs_running) == 0);
2400		mutex_lock(&fs_info->scrub_lock);
2401	}
2402	atomic_dec(&fs_info->scrub_cancel_req);
2403	mutex_unlock(&fs_info->scrub_lock);
2404
2405	return 0;
2406}
2407
2408int btrfs_scrub_cancel(struct btrfs_root *root)
2409{
2410	return __btrfs_scrub_cancel(root->fs_info);
2411}
2412
2413int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
2414{
2415	struct btrfs_fs_info *fs_info = root->fs_info;
2416	struct scrub_dev *sdev;
2417
2418	mutex_lock(&fs_info->scrub_lock);
2419	sdev = dev->scrub_device;
2420	if (!sdev) {
2421		mutex_unlock(&fs_info->scrub_lock);
2422		return -ENOTCONN;
2423	}
2424	atomic_inc(&sdev->cancel_req);
2425	while (dev->scrub_device) {
2426		mutex_unlock(&fs_info->scrub_lock);
2427		wait_event(fs_info->scrub_pause_wait,
2428			   dev->scrub_device == NULL);
2429		mutex_lock(&fs_info->scrub_lock);
2430	}
2431	mutex_unlock(&fs_info->scrub_lock);
2432
2433	return 0;
2434}
2435
2436int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
2437{
2438	struct btrfs_fs_info *fs_info = root->fs_info;
2439	struct btrfs_device *dev;
2440	int ret;
2441
2442	/*
2443	 * we have to hold the device_list_mutex here so the device
2444	 * does not go away in cancel_dev. FIXME: find a better solution
2445	 */
2446	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2447	dev = btrfs_find_device(root, devid, NULL, NULL);
2448	if (!dev) {
2449		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2450		return -ENODEV;
2451	}
2452	ret = btrfs_scrub_cancel_dev(root, dev);
2453	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2454
2455	return ret;
2456}
2457
2458int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
2459			 struct btrfs_scrub_progress *progress)
2460{
2461	struct btrfs_device *dev;
2462	struct scrub_dev *sdev = NULL;
2463
2464	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2465	dev = btrfs_find_device(root, devid, NULL, NULL);
2466	if (dev)
2467		sdev = dev->scrub_device;
2468	if (sdev)
2469		memcpy(progress, &sdev->stat, sizeof(*progress));
2470	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2471
2472	return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
2473}
2474