raid5.c revision 35f2a591192d0a5d9f7fc696869c76f0b8e49c3d
1/* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21/* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46#include <linux/blkdev.h> 47#include <linux/kthread.h> 48#include <linux/raid/pq.h> 49#include <linux/async_tx.h> 50#include <linux/async.h> 51#include <linux/seq_file.h> 52#include <linux/cpu.h> 53#include <linux/slab.h> 54#include "md.h" 55#include "raid5.h" 56#include "bitmap.h" 57 58/* 59 * Stripe cache 60 */ 61 62#define NR_STRIPES 256 63#define STRIPE_SIZE PAGE_SIZE 64#define STRIPE_SHIFT (PAGE_SHIFT - 9) 65#define STRIPE_SECTORS (STRIPE_SIZE>>9) 66#define IO_THRESHOLD 1 67#define BYPASS_THRESHOLD 1 68#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 69#define HASH_MASK (NR_HASH - 1) 70 71#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 72 73/* bio's attached to a stripe+device for I/O are linked together in bi_sector 74 * order without overlap. There may be several bio's per stripe+device, and 75 * a bio could span several devices. 76 * When walking this list for a particular stripe+device, we must never proceed 77 * beyond a bio that extends past this device, as the next bio might no longer 78 * be valid. 79 * This macro is used to determine the 'next' bio in the list, given the sector 80 * of the current stripe+device 81 */ 82#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 83/* 84 * The following can be used to debug the driver 85 */ 86#define RAID5_PARANOIA 1 87#if RAID5_PARANOIA && defined(CONFIG_SMP) 88# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 89#else 90# define CHECK_DEVLOCK() 91#endif 92 93#ifdef DEBUG 94#define inline 95#define __inline__ 96#endif 97 98#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) 99 100/* 101 * We maintain a biased count of active stripes in the bottom 16 bits of 102 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 103 */ 104static inline int raid5_bi_phys_segments(struct bio *bio) 105{ 106 return bio->bi_phys_segments & 0xffff; 107} 108 109static inline int raid5_bi_hw_segments(struct bio *bio) 110{ 111 return (bio->bi_phys_segments >> 16) & 0xffff; 112} 113 114static inline int raid5_dec_bi_phys_segments(struct bio *bio) 115{ 116 --bio->bi_phys_segments; 117 return raid5_bi_phys_segments(bio); 118} 119 120static inline int raid5_dec_bi_hw_segments(struct bio *bio) 121{ 122 unsigned short val = raid5_bi_hw_segments(bio); 123 124 --val; 125 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); 126 return val; 127} 128 129static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 130{ 131 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); 132} 133 134/* Find first data disk in a raid6 stripe */ 135static inline int raid6_d0(struct stripe_head *sh) 136{ 137 if (sh->ddf_layout) 138 /* ddf always start from first device */ 139 return 0; 140 /* md starts just after Q block */ 141 if (sh->qd_idx == sh->disks - 1) 142 return 0; 143 else 144 return sh->qd_idx + 1; 145} 146static inline int raid6_next_disk(int disk, int raid_disks) 147{ 148 disk++; 149 return (disk < raid_disks) ? disk : 0; 150} 151 152/* When walking through the disks in a raid5, starting at raid6_d0, 153 * We need to map each disk to a 'slot', where the data disks are slot 154 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 155 * is raid_disks-1. This help does that mapping. 156 */ 157static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 158 int *count, int syndrome_disks) 159{ 160 int slot = *count; 161 162 if (sh->ddf_layout) 163 (*count)++; 164 if (idx == sh->pd_idx) 165 return syndrome_disks; 166 if (idx == sh->qd_idx) 167 return syndrome_disks + 1; 168 if (!sh->ddf_layout) 169 (*count)++; 170 return slot; 171} 172 173static void return_io(struct bio *return_bi) 174{ 175 struct bio *bi = return_bi; 176 while (bi) { 177 178 return_bi = bi->bi_next; 179 bi->bi_next = NULL; 180 bi->bi_size = 0; 181 bio_endio(bi, 0); 182 bi = return_bi; 183 } 184} 185 186static void print_raid5_conf (raid5_conf_t *conf); 187 188static int stripe_operations_active(struct stripe_head *sh) 189{ 190 return sh->check_state || sh->reconstruct_state || 191 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 192 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 193} 194 195static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 196{ 197 if (atomic_dec_and_test(&sh->count)) { 198 BUG_ON(!list_empty(&sh->lru)); 199 BUG_ON(atomic_read(&conf->active_stripes)==0); 200 if (test_bit(STRIPE_HANDLE, &sh->state)) { 201 if (test_bit(STRIPE_DELAYED, &sh->state)) { 202 list_add_tail(&sh->lru, &conf->delayed_list); 203 blk_plug_device(conf->mddev->queue); 204 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 205 sh->bm_seq - conf->seq_write > 0) { 206 list_add_tail(&sh->lru, &conf->bitmap_list); 207 blk_plug_device(conf->mddev->queue); 208 } else { 209 clear_bit(STRIPE_BIT_DELAY, &sh->state); 210 list_add_tail(&sh->lru, &conf->handle_list); 211 } 212 md_wakeup_thread(conf->mddev->thread); 213 } else { 214 BUG_ON(stripe_operations_active(sh)); 215 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 216 atomic_dec(&conf->preread_active_stripes); 217 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 218 md_wakeup_thread(conf->mddev->thread); 219 } 220 atomic_dec(&conf->active_stripes); 221 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 222 list_add_tail(&sh->lru, &conf->inactive_list); 223 wake_up(&conf->wait_for_stripe); 224 if (conf->retry_read_aligned) 225 md_wakeup_thread(conf->mddev->thread); 226 } 227 } 228 } 229} 230 231static void release_stripe(struct stripe_head *sh) 232{ 233 raid5_conf_t *conf = sh->raid_conf; 234 unsigned long flags; 235 236 spin_lock_irqsave(&conf->device_lock, flags); 237 __release_stripe(conf, sh); 238 spin_unlock_irqrestore(&conf->device_lock, flags); 239} 240 241static inline void remove_hash(struct stripe_head *sh) 242{ 243 pr_debug("remove_hash(), stripe %llu\n", 244 (unsigned long long)sh->sector); 245 246 hlist_del_init(&sh->hash); 247} 248 249static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 250{ 251 struct hlist_head *hp = stripe_hash(conf, sh->sector); 252 253 pr_debug("insert_hash(), stripe %llu\n", 254 (unsigned long long)sh->sector); 255 256 CHECK_DEVLOCK(); 257 hlist_add_head(&sh->hash, hp); 258} 259 260 261/* find an idle stripe, make sure it is unhashed, and return it. */ 262static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 263{ 264 struct stripe_head *sh = NULL; 265 struct list_head *first; 266 267 CHECK_DEVLOCK(); 268 if (list_empty(&conf->inactive_list)) 269 goto out; 270 first = conf->inactive_list.next; 271 sh = list_entry(first, struct stripe_head, lru); 272 list_del_init(first); 273 remove_hash(sh); 274 atomic_inc(&conf->active_stripes); 275out: 276 return sh; 277} 278 279static void shrink_buffers(struct stripe_head *sh, int num) 280{ 281 struct page *p; 282 int i; 283 284 for (i=0; i<num ; i++) { 285 p = sh->dev[i].page; 286 if (!p) 287 continue; 288 sh->dev[i].page = NULL; 289 put_page(p); 290 } 291} 292 293static int grow_buffers(struct stripe_head *sh, int num) 294{ 295 int i; 296 297 for (i=0; i<num; i++) { 298 struct page *page; 299 300 if (!(page = alloc_page(GFP_KERNEL))) { 301 return 1; 302 } 303 sh->dev[i].page = page; 304 } 305 return 0; 306} 307 308static void raid5_build_block(struct stripe_head *sh, int i, int previous); 309static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 310 struct stripe_head *sh); 311 312static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 313{ 314 raid5_conf_t *conf = sh->raid_conf; 315 int i; 316 317 BUG_ON(atomic_read(&sh->count) != 0); 318 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 319 BUG_ON(stripe_operations_active(sh)); 320 321 CHECK_DEVLOCK(); 322 pr_debug("init_stripe called, stripe %llu\n", 323 (unsigned long long)sh->sector); 324 325 remove_hash(sh); 326 327 sh->generation = conf->generation - previous; 328 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 329 sh->sector = sector; 330 stripe_set_idx(sector, conf, previous, sh); 331 sh->state = 0; 332 333 334 for (i = sh->disks; i--; ) { 335 struct r5dev *dev = &sh->dev[i]; 336 337 if (dev->toread || dev->read || dev->towrite || dev->written || 338 test_bit(R5_LOCKED, &dev->flags)) { 339 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 340 (unsigned long long)sh->sector, i, dev->toread, 341 dev->read, dev->towrite, dev->written, 342 test_bit(R5_LOCKED, &dev->flags)); 343 BUG(); 344 } 345 dev->flags = 0; 346 raid5_build_block(sh, i, previous); 347 } 348 insert_hash(conf, sh); 349} 350 351static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, 352 short generation) 353{ 354 struct stripe_head *sh; 355 struct hlist_node *hn; 356 357 CHECK_DEVLOCK(); 358 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 359 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 360 if (sh->sector == sector && sh->generation == generation) 361 return sh; 362 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 363 return NULL; 364} 365 366static void unplug_slaves(mddev_t *mddev); 367static void raid5_unplug_device(struct request_queue *q); 368 369static struct stripe_head * 370get_active_stripe(raid5_conf_t *conf, sector_t sector, 371 int previous, int noblock, int noquiesce) 372{ 373 struct stripe_head *sh; 374 375 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 376 377 spin_lock_irq(&conf->device_lock); 378 379 do { 380 wait_event_lock_irq(conf->wait_for_stripe, 381 conf->quiesce == 0 || noquiesce, 382 conf->device_lock, /* nothing */); 383 sh = __find_stripe(conf, sector, conf->generation - previous); 384 if (!sh) { 385 if (!conf->inactive_blocked) 386 sh = get_free_stripe(conf); 387 if (noblock && sh == NULL) 388 break; 389 if (!sh) { 390 conf->inactive_blocked = 1; 391 wait_event_lock_irq(conf->wait_for_stripe, 392 !list_empty(&conf->inactive_list) && 393 (atomic_read(&conf->active_stripes) 394 < (conf->max_nr_stripes *3/4) 395 || !conf->inactive_blocked), 396 conf->device_lock, 397 raid5_unplug_device(conf->mddev->queue) 398 ); 399 conf->inactive_blocked = 0; 400 } else 401 init_stripe(sh, sector, previous); 402 } else { 403 if (atomic_read(&sh->count)) { 404 BUG_ON(!list_empty(&sh->lru) 405 && !test_bit(STRIPE_EXPANDING, &sh->state)); 406 } else { 407 if (!test_bit(STRIPE_HANDLE, &sh->state)) 408 atomic_inc(&conf->active_stripes); 409 if (list_empty(&sh->lru) && 410 !test_bit(STRIPE_EXPANDING, &sh->state)) 411 BUG(); 412 list_del_init(&sh->lru); 413 } 414 } 415 } while (sh == NULL); 416 417 if (sh) 418 atomic_inc(&sh->count); 419 420 spin_unlock_irq(&conf->device_lock); 421 return sh; 422} 423 424static void 425raid5_end_read_request(struct bio *bi, int error); 426static void 427raid5_end_write_request(struct bio *bi, int error); 428 429static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 430{ 431 raid5_conf_t *conf = sh->raid_conf; 432 int i, disks = sh->disks; 433 434 might_sleep(); 435 436 for (i = disks; i--; ) { 437 int rw; 438 struct bio *bi; 439 mdk_rdev_t *rdev; 440 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 441 rw = WRITE; 442 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 443 rw = READ; 444 else 445 continue; 446 447 bi = &sh->dev[i].req; 448 449 bi->bi_rw = rw; 450 if (rw == WRITE) 451 bi->bi_end_io = raid5_end_write_request; 452 else 453 bi->bi_end_io = raid5_end_read_request; 454 455 rcu_read_lock(); 456 rdev = rcu_dereference(conf->disks[i].rdev); 457 if (rdev && test_bit(Faulty, &rdev->flags)) 458 rdev = NULL; 459 if (rdev) 460 atomic_inc(&rdev->nr_pending); 461 rcu_read_unlock(); 462 463 if (rdev) { 464 if (s->syncing || s->expanding || s->expanded) 465 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 466 467 set_bit(STRIPE_IO_STARTED, &sh->state); 468 469 bi->bi_bdev = rdev->bdev; 470 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 471 __func__, (unsigned long long)sh->sector, 472 bi->bi_rw, i); 473 atomic_inc(&sh->count); 474 bi->bi_sector = sh->sector + rdev->data_offset; 475 bi->bi_flags = 1 << BIO_UPTODATE; 476 bi->bi_vcnt = 1; 477 bi->bi_max_vecs = 1; 478 bi->bi_idx = 0; 479 bi->bi_io_vec = &sh->dev[i].vec; 480 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 481 bi->bi_io_vec[0].bv_offset = 0; 482 bi->bi_size = STRIPE_SIZE; 483 bi->bi_next = NULL; 484 if (rw == WRITE && 485 test_bit(R5_ReWrite, &sh->dev[i].flags)) 486 atomic_add(STRIPE_SECTORS, 487 &rdev->corrected_errors); 488 generic_make_request(bi); 489 } else { 490 if (rw == WRITE) 491 set_bit(STRIPE_DEGRADED, &sh->state); 492 pr_debug("skip op %ld on disc %d for sector %llu\n", 493 bi->bi_rw, i, (unsigned long long)sh->sector); 494 clear_bit(R5_LOCKED, &sh->dev[i].flags); 495 set_bit(STRIPE_HANDLE, &sh->state); 496 } 497 } 498} 499 500static struct dma_async_tx_descriptor * 501async_copy_data(int frombio, struct bio *bio, struct page *page, 502 sector_t sector, struct dma_async_tx_descriptor *tx) 503{ 504 struct bio_vec *bvl; 505 struct page *bio_page; 506 int i; 507 int page_offset; 508 struct async_submit_ctl submit; 509 enum async_tx_flags flags = 0; 510 511 if (bio->bi_sector >= sector) 512 page_offset = (signed)(bio->bi_sector - sector) * 512; 513 else 514 page_offset = (signed)(sector - bio->bi_sector) * -512; 515 516 if (frombio) 517 flags |= ASYNC_TX_FENCE; 518 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 519 520 bio_for_each_segment(bvl, bio, i) { 521 int len = bio_iovec_idx(bio, i)->bv_len; 522 int clen; 523 int b_offset = 0; 524 525 if (page_offset < 0) { 526 b_offset = -page_offset; 527 page_offset += b_offset; 528 len -= b_offset; 529 } 530 531 if (len > 0 && page_offset + len > STRIPE_SIZE) 532 clen = STRIPE_SIZE - page_offset; 533 else 534 clen = len; 535 536 if (clen > 0) { 537 b_offset += bio_iovec_idx(bio, i)->bv_offset; 538 bio_page = bio_iovec_idx(bio, i)->bv_page; 539 if (frombio) 540 tx = async_memcpy(page, bio_page, page_offset, 541 b_offset, clen, &submit); 542 else 543 tx = async_memcpy(bio_page, page, b_offset, 544 page_offset, clen, &submit); 545 } 546 /* chain the operations */ 547 submit.depend_tx = tx; 548 549 if (clen < len) /* hit end of page */ 550 break; 551 page_offset += len; 552 } 553 554 return tx; 555} 556 557static void ops_complete_biofill(void *stripe_head_ref) 558{ 559 struct stripe_head *sh = stripe_head_ref; 560 struct bio *return_bi = NULL; 561 raid5_conf_t *conf = sh->raid_conf; 562 int i; 563 564 pr_debug("%s: stripe %llu\n", __func__, 565 (unsigned long long)sh->sector); 566 567 /* clear completed biofills */ 568 spin_lock_irq(&conf->device_lock); 569 for (i = sh->disks; i--; ) { 570 struct r5dev *dev = &sh->dev[i]; 571 572 /* acknowledge completion of a biofill operation */ 573 /* and check if we need to reply to a read request, 574 * new R5_Wantfill requests are held off until 575 * !STRIPE_BIOFILL_RUN 576 */ 577 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 578 struct bio *rbi, *rbi2; 579 580 BUG_ON(!dev->read); 581 rbi = dev->read; 582 dev->read = NULL; 583 while (rbi && rbi->bi_sector < 584 dev->sector + STRIPE_SECTORS) { 585 rbi2 = r5_next_bio(rbi, dev->sector); 586 if (!raid5_dec_bi_phys_segments(rbi)) { 587 rbi->bi_next = return_bi; 588 return_bi = rbi; 589 } 590 rbi = rbi2; 591 } 592 } 593 } 594 spin_unlock_irq(&conf->device_lock); 595 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 596 597 return_io(return_bi); 598 599 set_bit(STRIPE_HANDLE, &sh->state); 600 release_stripe(sh); 601} 602 603static void ops_run_biofill(struct stripe_head *sh) 604{ 605 struct dma_async_tx_descriptor *tx = NULL; 606 raid5_conf_t *conf = sh->raid_conf; 607 struct async_submit_ctl submit; 608 int i; 609 610 pr_debug("%s: stripe %llu\n", __func__, 611 (unsigned long long)sh->sector); 612 613 for (i = sh->disks; i--; ) { 614 struct r5dev *dev = &sh->dev[i]; 615 if (test_bit(R5_Wantfill, &dev->flags)) { 616 struct bio *rbi; 617 spin_lock_irq(&conf->device_lock); 618 dev->read = rbi = dev->toread; 619 dev->toread = NULL; 620 spin_unlock_irq(&conf->device_lock); 621 while (rbi && rbi->bi_sector < 622 dev->sector + STRIPE_SECTORS) { 623 tx = async_copy_data(0, rbi, dev->page, 624 dev->sector, tx); 625 rbi = r5_next_bio(rbi, dev->sector); 626 } 627 } 628 } 629 630 atomic_inc(&sh->count); 631 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 632 async_trigger_callback(&submit); 633} 634 635static void mark_target_uptodate(struct stripe_head *sh, int target) 636{ 637 struct r5dev *tgt; 638 639 if (target < 0) 640 return; 641 642 tgt = &sh->dev[target]; 643 set_bit(R5_UPTODATE, &tgt->flags); 644 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 645 clear_bit(R5_Wantcompute, &tgt->flags); 646} 647 648static void ops_complete_compute(void *stripe_head_ref) 649{ 650 struct stripe_head *sh = stripe_head_ref; 651 652 pr_debug("%s: stripe %llu\n", __func__, 653 (unsigned long long)sh->sector); 654 655 /* mark the computed target(s) as uptodate */ 656 mark_target_uptodate(sh, sh->ops.target); 657 mark_target_uptodate(sh, sh->ops.target2); 658 659 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 660 if (sh->check_state == check_state_compute_run) 661 sh->check_state = check_state_compute_result; 662 set_bit(STRIPE_HANDLE, &sh->state); 663 release_stripe(sh); 664} 665 666/* return a pointer to the address conversion region of the scribble buffer */ 667static addr_conv_t *to_addr_conv(struct stripe_head *sh, 668 struct raid5_percpu *percpu) 669{ 670 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2); 671} 672 673static struct dma_async_tx_descriptor * 674ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 675{ 676 int disks = sh->disks; 677 struct page **xor_srcs = percpu->scribble; 678 int target = sh->ops.target; 679 struct r5dev *tgt = &sh->dev[target]; 680 struct page *xor_dest = tgt->page; 681 int count = 0; 682 struct dma_async_tx_descriptor *tx; 683 struct async_submit_ctl submit; 684 int i; 685 686 pr_debug("%s: stripe %llu block: %d\n", 687 __func__, (unsigned long long)sh->sector, target); 688 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 689 690 for (i = disks; i--; ) 691 if (i != target) 692 xor_srcs[count++] = sh->dev[i].page; 693 694 atomic_inc(&sh->count); 695 696 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 697 ops_complete_compute, sh, to_addr_conv(sh, percpu)); 698 if (unlikely(count == 1)) 699 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 700 else 701 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 702 703 return tx; 704} 705 706/* set_syndrome_sources - populate source buffers for gen_syndrome 707 * @srcs - (struct page *) array of size sh->disks 708 * @sh - stripe_head to parse 709 * 710 * Populates srcs in proper layout order for the stripe and returns the 711 * 'count' of sources to be used in a call to async_gen_syndrome. The P 712 * destination buffer is recorded in srcs[count] and the Q destination 713 * is recorded in srcs[count+1]]. 714 */ 715static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) 716{ 717 int disks = sh->disks; 718 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 719 int d0_idx = raid6_d0(sh); 720 int count; 721 int i; 722 723 for (i = 0; i < disks; i++) 724 srcs[i] = NULL; 725 726 count = 0; 727 i = d0_idx; 728 do { 729 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 730 731 srcs[slot] = sh->dev[i].page; 732 i = raid6_next_disk(i, disks); 733 } while (i != d0_idx); 734 735 return syndrome_disks; 736} 737 738static struct dma_async_tx_descriptor * 739ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 740{ 741 int disks = sh->disks; 742 struct page **blocks = percpu->scribble; 743 int target; 744 int qd_idx = sh->qd_idx; 745 struct dma_async_tx_descriptor *tx; 746 struct async_submit_ctl submit; 747 struct r5dev *tgt; 748 struct page *dest; 749 int i; 750 int count; 751 752 if (sh->ops.target < 0) 753 target = sh->ops.target2; 754 else if (sh->ops.target2 < 0) 755 target = sh->ops.target; 756 else 757 /* we should only have one valid target */ 758 BUG(); 759 BUG_ON(target < 0); 760 pr_debug("%s: stripe %llu block: %d\n", 761 __func__, (unsigned long long)sh->sector, target); 762 763 tgt = &sh->dev[target]; 764 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 765 dest = tgt->page; 766 767 atomic_inc(&sh->count); 768 769 if (target == qd_idx) { 770 count = set_syndrome_sources(blocks, sh); 771 blocks[count] = NULL; /* regenerating p is not necessary */ 772 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 773 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 774 ops_complete_compute, sh, 775 to_addr_conv(sh, percpu)); 776 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 777 } else { 778 /* Compute any data- or p-drive using XOR */ 779 count = 0; 780 for (i = disks; i-- ; ) { 781 if (i == target || i == qd_idx) 782 continue; 783 blocks[count++] = sh->dev[i].page; 784 } 785 786 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 787 NULL, ops_complete_compute, sh, 788 to_addr_conv(sh, percpu)); 789 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 790 } 791 792 return tx; 793} 794 795static struct dma_async_tx_descriptor * 796ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 797{ 798 int i, count, disks = sh->disks; 799 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 800 int d0_idx = raid6_d0(sh); 801 int faila = -1, failb = -1; 802 int target = sh->ops.target; 803 int target2 = sh->ops.target2; 804 struct r5dev *tgt = &sh->dev[target]; 805 struct r5dev *tgt2 = &sh->dev[target2]; 806 struct dma_async_tx_descriptor *tx; 807 struct page **blocks = percpu->scribble; 808 struct async_submit_ctl submit; 809 810 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 811 __func__, (unsigned long long)sh->sector, target, target2); 812 BUG_ON(target < 0 || target2 < 0); 813 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 814 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 815 816 /* we need to open-code set_syndrome_sources to handle the 817 * slot number conversion for 'faila' and 'failb' 818 */ 819 for (i = 0; i < disks ; i++) 820 blocks[i] = NULL; 821 count = 0; 822 i = d0_idx; 823 do { 824 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 825 826 blocks[slot] = sh->dev[i].page; 827 828 if (i == target) 829 faila = slot; 830 if (i == target2) 831 failb = slot; 832 i = raid6_next_disk(i, disks); 833 } while (i != d0_idx); 834 835 BUG_ON(faila == failb); 836 if (failb < faila) 837 swap(faila, failb); 838 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 839 __func__, (unsigned long long)sh->sector, faila, failb); 840 841 atomic_inc(&sh->count); 842 843 if (failb == syndrome_disks+1) { 844 /* Q disk is one of the missing disks */ 845 if (faila == syndrome_disks) { 846 /* Missing P+Q, just recompute */ 847 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 848 ops_complete_compute, sh, 849 to_addr_conv(sh, percpu)); 850 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 851 STRIPE_SIZE, &submit); 852 } else { 853 struct page *dest; 854 int data_target; 855 int qd_idx = sh->qd_idx; 856 857 /* Missing D+Q: recompute D from P, then recompute Q */ 858 if (target == qd_idx) 859 data_target = target2; 860 else 861 data_target = target; 862 863 count = 0; 864 for (i = disks; i-- ; ) { 865 if (i == data_target || i == qd_idx) 866 continue; 867 blocks[count++] = sh->dev[i].page; 868 } 869 dest = sh->dev[data_target].page; 870 init_async_submit(&submit, 871 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 872 NULL, NULL, NULL, 873 to_addr_conv(sh, percpu)); 874 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 875 &submit); 876 877 count = set_syndrome_sources(blocks, sh); 878 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 879 ops_complete_compute, sh, 880 to_addr_conv(sh, percpu)); 881 return async_gen_syndrome(blocks, 0, count+2, 882 STRIPE_SIZE, &submit); 883 } 884 } else { 885 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 886 ops_complete_compute, sh, 887 to_addr_conv(sh, percpu)); 888 if (failb == syndrome_disks) { 889 /* We're missing D+P. */ 890 return async_raid6_datap_recov(syndrome_disks+2, 891 STRIPE_SIZE, faila, 892 blocks, &submit); 893 } else { 894 /* We're missing D+D. */ 895 return async_raid6_2data_recov(syndrome_disks+2, 896 STRIPE_SIZE, faila, failb, 897 blocks, &submit); 898 } 899 } 900} 901 902 903static void ops_complete_prexor(void *stripe_head_ref) 904{ 905 struct stripe_head *sh = stripe_head_ref; 906 907 pr_debug("%s: stripe %llu\n", __func__, 908 (unsigned long long)sh->sector); 909} 910 911static struct dma_async_tx_descriptor * 912ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, 913 struct dma_async_tx_descriptor *tx) 914{ 915 int disks = sh->disks; 916 struct page **xor_srcs = percpu->scribble; 917 int count = 0, pd_idx = sh->pd_idx, i; 918 struct async_submit_ctl submit; 919 920 /* existing parity data subtracted */ 921 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 922 923 pr_debug("%s: stripe %llu\n", __func__, 924 (unsigned long long)sh->sector); 925 926 for (i = disks; i--; ) { 927 struct r5dev *dev = &sh->dev[i]; 928 /* Only process blocks that are known to be uptodate */ 929 if (test_bit(R5_Wantdrain, &dev->flags)) 930 xor_srcs[count++] = dev->page; 931 } 932 933 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 934 ops_complete_prexor, sh, to_addr_conv(sh, percpu)); 935 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 936 937 return tx; 938} 939 940static struct dma_async_tx_descriptor * 941ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 942{ 943 int disks = sh->disks; 944 int i; 945 946 pr_debug("%s: stripe %llu\n", __func__, 947 (unsigned long long)sh->sector); 948 949 for (i = disks; i--; ) { 950 struct r5dev *dev = &sh->dev[i]; 951 struct bio *chosen; 952 953 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 954 struct bio *wbi; 955 956 spin_lock(&sh->lock); 957 chosen = dev->towrite; 958 dev->towrite = NULL; 959 BUG_ON(dev->written); 960 wbi = dev->written = chosen; 961 spin_unlock(&sh->lock); 962 963 while (wbi && wbi->bi_sector < 964 dev->sector + STRIPE_SECTORS) { 965 tx = async_copy_data(1, wbi, dev->page, 966 dev->sector, tx); 967 wbi = r5_next_bio(wbi, dev->sector); 968 } 969 } 970 } 971 972 return tx; 973} 974 975static void ops_complete_reconstruct(void *stripe_head_ref) 976{ 977 struct stripe_head *sh = stripe_head_ref; 978 int disks = sh->disks; 979 int pd_idx = sh->pd_idx; 980 int qd_idx = sh->qd_idx; 981 int i; 982 983 pr_debug("%s: stripe %llu\n", __func__, 984 (unsigned long long)sh->sector); 985 986 for (i = disks; i--; ) { 987 struct r5dev *dev = &sh->dev[i]; 988 989 if (dev->written || i == pd_idx || i == qd_idx) 990 set_bit(R5_UPTODATE, &dev->flags); 991 } 992 993 if (sh->reconstruct_state == reconstruct_state_drain_run) 994 sh->reconstruct_state = reconstruct_state_drain_result; 995 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 996 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 997 else { 998 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 999 sh->reconstruct_state = reconstruct_state_result; 1000 } 1001 1002 set_bit(STRIPE_HANDLE, &sh->state); 1003 release_stripe(sh); 1004} 1005 1006static void 1007ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1008 struct dma_async_tx_descriptor *tx) 1009{ 1010 int disks = sh->disks; 1011 struct page **xor_srcs = percpu->scribble; 1012 struct async_submit_ctl submit; 1013 int count = 0, pd_idx = sh->pd_idx, i; 1014 struct page *xor_dest; 1015 int prexor = 0; 1016 unsigned long flags; 1017 1018 pr_debug("%s: stripe %llu\n", __func__, 1019 (unsigned long long)sh->sector); 1020 1021 /* check if prexor is active which means only process blocks 1022 * that are part of a read-modify-write (written) 1023 */ 1024 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1025 prexor = 1; 1026 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1027 for (i = disks; i--; ) { 1028 struct r5dev *dev = &sh->dev[i]; 1029 if (dev->written) 1030 xor_srcs[count++] = dev->page; 1031 } 1032 } else { 1033 xor_dest = sh->dev[pd_idx].page; 1034 for (i = disks; i--; ) { 1035 struct r5dev *dev = &sh->dev[i]; 1036 if (i != pd_idx) 1037 xor_srcs[count++] = dev->page; 1038 } 1039 } 1040 1041 /* 1/ if we prexor'd then the dest is reused as a source 1042 * 2/ if we did not prexor then we are redoing the parity 1043 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1044 * for the synchronous xor case 1045 */ 1046 flags = ASYNC_TX_ACK | 1047 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1048 1049 atomic_inc(&sh->count); 1050 1051 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh, 1052 to_addr_conv(sh, percpu)); 1053 if (unlikely(count == 1)) 1054 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1055 else 1056 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1057} 1058 1059static void 1060ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1061 struct dma_async_tx_descriptor *tx) 1062{ 1063 struct async_submit_ctl submit; 1064 struct page **blocks = percpu->scribble; 1065 int count; 1066 1067 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1068 1069 count = set_syndrome_sources(blocks, sh); 1070 1071 atomic_inc(&sh->count); 1072 1073 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct, 1074 sh, to_addr_conv(sh, percpu)); 1075 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1076} 1077 1078static void ops_complete_check(void *stripe_head_ref) 1079{ 1080 struct stripe_head *sh = stripe_head_ref; 1081 1082 pr_debug("%s: stripe %llu\n", __func__, 1083 (unsigned long long)sh->sector); 1084 1085 sh->check_state = check_state_check_result; 1086 set_bit(STRIPE_HANDLE, &sh->state); 1087 release_stripe(sh); 1088} 1089 1090static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1091{ 1092 int disks = sh->disks; 1093 int pd_idx = sh->pd_idx; 1094 int qd_idx = sh->qd_idx; 1095 struct page *xor_dest; 1096 struct page **xor_srcs = percpu->scribble; 1097 struct dma_async_tx_descriptor *tx; 1098 struct async_submit_ctl submit; 1099 int count; 1100 int i; 1101 1102 pr_debug("%s: stripe %llu\n", __func__, 1103 (unsigned long long)sh->sector); 1104 1105 count = 0; 1106 xor_dest = sh->dev[pd_idx].page; 1107 xor_srcs[count++] = xor_dest; 1108 for (i = disks; i--; ) { 1109 if (i == pd_idx || i == qd_idx) 1110 continue; 1111 xor_srcs[count++] = sh->dev[i].page; 1112 } 1113 1114 init_async_submit(&submit, 0, NULL, NULL, NULL, 1115 to_addr_conv(sh, percpu)); 1116 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 1117 &sh->ops.zero_sum_result, &submit); 1118 1119 atomic_inc(&sh->count); 1120 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 1121 tx = async_trigger_callback(&submit); 1122} 1123 1124static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 1125{ 1126 struct page **srcs = percpu->scribble; 1127 struct async_submit_ctl submit; 1128 int count; 1129 1130 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 1131 (unsigned long long)sh->sector, checkp); 1132 1133 count = set_syndrome_sources(srcs, sh); 1134 if (!checkp) 1135 srcs[count] = NULL; 1136 1137 atomic_inc(&sh->count); 1138 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 1139 sh, to_addr_conv(sh, percpu)); 1140 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 1141 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1142} 1143 1144static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1145{ 1146 int overlap_clear = 0, i, disks = sh->disks; 1147 struct dma_async_tx_descriptor *tx = NULL; 1148 raid5_conf_t *conf = sh->raid_conf; 1149 int level = conf->level; 1150 struct raid5_percpu *percpu; 1151 unsigned long cpu; 1152 1153 cpu = get_cpu(); 1154 percpu = per_cpu_ptr(conf->percpu, cpu); 1155 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 1156 ops_run_biofill(sh); 1157 overlap_clear++; 1158 } 1159 1160 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 1161 if (level < 6) 1162 tx = ops_run_compute5(sh, percpu); 1163 else { 1164 if (sh->ops.target2 < 0 || sh->ops.target < 0) 1165 tx = ops_run_compute6_1(sh, percpu); 1166 else 1167 tx = ops_run_compute6_2(sh, percpu); 1168 } 1169 /* terminate the chain if reconstruct is not set to be run */ 1170 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 1171 async_tx_ack(tx); 1172 } 1173 1174 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 1175 tx = ops_run_prexor(sh, percpu, tx); 1176 1177 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 1178 tx = ops_run_biodrain(sh, tx); 1179 overlap_clear++; 1180 } 1181 1182 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 1183 if (level < 6) 1184 ops_run_reconstruct5(sh, percpu, tx); 1185 else 1186 ops_run_reconstruct6(sh, percpu, tx); 1187 } 1188 1189 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 1190 if (sh->check_state == check_state_run) 1191 ops_run_check_p(sh, percpu); 1192 else if (sh->check_state == check_state_run_q) 1193 ops_run_check_pq(sh, percpu, 0); 1194 else if (sh->check_state == check_state_run_pq) 1195 ops_run_check_pq(sh, percpu, 1); 1196 else 1197 BUG(); 1198 } 1199 1200 if (overlap_clear) 1201 for (i = disks; i--; ) { 1202 struct r5dev *dev = &sh->dev[i]; 1203 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1204 wake_up(&sh->raid_conf->wait_for_overlap); 1205 } 1206 put_cpu(); 1207} 1208 1209#ifdef CONFIG_MULTICORE_RAID456 1210static void async_run_ops(void *param, async_cookie_t cookie) 1211{ 1212 struct stripe_head *sh = param; 1213 unsigned long ops_request = sh->ops.request; 1214 1215 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state); 1216 wake_up(&sh->ops.wait_for_ops); 1217 1218 __raid_run_ops(sh, ops_request); 1219 release_stripe(sh); 1220} 1221 1222static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1223{ 1224 /* since handle_stripe can be called outside of raid5d context 1225 * we need to ensure sh->ops.request is de-staged before another 1226 * request arrives 1227 */ 1228 wait_event(sh->ops.wait_for_ops, 1229 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state)); 1230 sh->ops.request = ops_request; 1231 1232 atomic_inc(&sh->count); 1233 async_schedule(async_run_ops, sh); 1234} 1235#else 1236#define raid_run_ops __raid_run_ops 1237#endif 1238 1239static int grow_one_stripe(raid5_conf_t *conf) 1240{ 1241 struct stripe_head *sh; 1242 int disks = max(conf->raid_disks, conf->previous_raid_disks); 1243 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 1244 if (!sh) 1245 return 0; 1246 memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev)); 1247 sh->raid_conf = conf; 1248 spin_lock_init(&sh->lock); 1249 #ifdef CONFIG_MULTICORE_RAID456 1250 init_waitqueue_head(&sh->ops.wait_for_ops); 1251 #endif 1252 1253 if (grow_buffers(sh, disks)) { 1254 shrink_buffers(sh, disks); 1255 kmem_cache_free(conf->slab_cache, sh); 1256 return 0; 1257 } 1258 /* we just created an active stripe so... */ 1259 atomic_set(&sh->count, 1); 1260 atomic_inc(&conf->active_stripes); 1261 INIT_LIST_HEAD(&sh->lru); 1262 release_stripe(sh); 1263 return 1; 1264} 1265 1266static int grow_stripes(raid5_conf_t *conf, int num) 1267{ 1268 struct kmem_cache *sc; 1269 int devs = max(conf->raid_disks, conf->previous_raid_disks); 1270 1271 sprintf(conf->cache_name[0], 1272 "raid%d-%s", conf->level, mdname(conf->mddev)); 1273 sprintf(conf->cache_name[1], 1274 "raid%d-%s-alt", conf->level, mdname(conf->mddev)); 1275 conf->active_name = 0; 1276 sc = kmem_cache_create(conf->cache_name[conf->active_name], 1277 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 1278 0, 0, NULL); 1279 if (!sc) 1280 return 1; 1281 conf->slab_cache = sc; 1282 conf->pool_size = devs; 1283 while (num--) 1284 if (!grow_one_stripe(conf)) 1285 return 1; 1286 return 0; 1287} 1288 1289/** 1290 * scribble_len - return the required size of the scribble region 1291 * @num - total number of disks in the array 1292 * 1293 * The size must be enough to contain: 1294 * 1/ a struct page pointer for each device in the array +2 1295 * 2/ room to convert each entry in (1) to its corresponding dma 1296 * (dma_map_page()) or page (page_address()) address. 1297 * 1298 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 1299 * calculate over all devices (not just the data blocks), using zeros in place 1300 * of the P and Q blocks. 1301 */ 1302static size_t scribble_len(int num) 1303{ 1304 size_t len; 1305 1306 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); 1307 1308 return len; 1309} 1310 1311static int resize_stripes(raid5_conf_t *conf, int newsize) 1312{ 1313 /* Make all the stripes able to hold 'newsize' devices. 1314 * New slots in each stripe get 'page' set to a new page. 1315 * 1316 * This happens in stages: 1317 * 1/ create a new kmem_cache and allocate the required number of 1318 * stripe_heads. 1319 * 2/ gather all the old stripe_heads and tranfer the pages across 1320 * to the new stripe_heads. This will have the side effect of 1321 * freezing the array as once all stripe_heads have been collected, 1322 * no IO will be possible. Old stripe heads are freed once their 1323 * pages have been transferred over, and the old kmem_cache is 1324 * freed when all stripes are done. 1325 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 1326 * we simple return a failre status - no need to clean anything up. 1327 * 4/ allocate new pages for the new slots in the new stripe_heads. 1328 * If this fails, we don't bother trying the shrink the 1329 * stripe_heads down again, we just leave them as they are. 1330 * As each stripe_head is processed the new one is released into 1331 * active service. 1332 * 1333 * Once step2 is started, we cannot afford to wait for a write, 1334 * so we use GFP_NOIO allocations. 1335 */ 1336 struct stripe_head *osh, *nsh; 1337 LIST_HEAD(newstripes); 1338 struct disk_info *ndisks; 1339 unsigned long cpu; 1340 int err; 1341 struct kmem_cache *sc; 1342 int i; 1343 1344 if (newsize <= conf->pool_size) 1345 return 0; /* never bother to shrink */ 1346 1347 err = md_allow_write(conf->mddev); 1348 if (err) 1349 return err; 1350 1351 /* Step 1 */ 1352 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1353 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1354 0, 0, NULL); 1355 if (!sc) 1356 return -ENOMEM; 1357 1358 for (i = conf->max_nr_stripes; i; i--) { 1359 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 1360 if (!nsh) 1361 break; 1362 1363 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 1364 1365 nsh->raid_conf = conf; 1366 spin_lock_init(&nsh->lock); 1367 #ifdef CONFIG_MULTICORE_RAID456 1368 init_waitqueue_head(&nsh->ops.wait_for_ops); 1369 #endif 1370 1371 list_add(&nsh->lru, &newstripes); 1372 } 1373 if (i) { 1374 /* didn't get enough, give up */ 1375 while (!list_empty(&newstripes)) { 1376 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1377 list_del(&nsh->lru); 1378 kmem_cache_free(sc, nsh); 1379 } 1380 kmem_cache_destroy(sc); 1381 return -ENOMEM; 1382 } 1383 /* Step 2 - Must use GFP_NOIO now. 1384 * OK, we have enough stripes, start collecting inactive 1385 * stripes and copying them over 1386 */ 1387 list_for_each_entry(nsh, &newstripes, lru) { 1388 spin_lock_irq(&conf->device_lock); 1389 wait_event_lock_irq(conf->wait_for_stripe, 1390 !list_empty(&conf->inactive_list), 1391 conf->device_lock, 1392 unplug_slaves(conf->mddev) 1393 ); 1394 osh = get_free_stripe(conf); 1395 spin_unlock_irq(&conf->device_lock); 1396 atomic_set(&nsh->count, 1); 1397 for(i=0; i<conf->pool_size; i++) 1398 nsh->dev[i].page = osh->dev[i].page; 1399 for( ; i<newsize; i++) 1400 nsh->dev[i].page = NULL; 1401 kmem_cache_free(conf->slab_cache, osh); 1402 } 1403 kmem_cache_destroy(conf->slab_cache); 1404 1405 /* Step 3. 1406 * At this point, we are holding all the stripes so the array 1407 * is completely stalled, so now is a good time to resize 1408 * conf->disks and the scribble region 1409 */ 1410 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1411 if (ndisks) { 1412 for (i=0; i<conf->raid_disks; i++) 1413 ndisks[i] = conf->disks[i]; 1414 kfree(conf->disks); 1415 conf->disks = ndisks; 1416 } else 1417 err = -ENOMEM; 1418 1419 get_online_cpus(); 1420 conf->scribble_len = scribble_len(newsize); 1421 for_each_present_cpu(cpu) { 1422 struct raid5_percpu *percpu; 1423 void *scribble; 1424 1425 percpu = per_cpu_ptr(conf->percpu, cpu); 1426 scribble = kmalloc(conf->scribble_len, GFP_NOIO); 1427 1428 if (scribble) { 1429 kfree(percpu->scribble); 1430 percpu->scribble = scribble; 1431 } else { 1432 err = -ENOMEM; 1433 break; 1434 } 1435 } 1436 put_online_cpus(); 1437 1438 /* Step 4, return new stripes to service */ 1439 while(!list_empty(&newstripes)) { 1440 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1441 list_del_init(&nsh->lru); 1442 1443 for (i=conf->raid_disks; i < newsize; i++) 1444 if (nsh->dev[i].page == NULL) { 1445 struct page *p = alloc_page(GFP_NOIO); 1446 nsh->dev[i].page = p; 1447 if (!p) 1448 err = -ENOMEM; 1449 } 1450 release_stripe(nsh); 1451 } 1452 /* critical section pass, GFP_NOIO no longer needed */ 1453 1454 conf->slab_cache = sc; 1455 conf->active_name = 1-conf->active_name; 1456 conf->pool_size = newsize; 1457 return err; 1458} 1459 1460static int drop_one_stripe(raid5_conf_t *conf) 1461{ 1462 struct stripe_head *sh; 1463 1464 spin_lock_irq(&conf->device_lock); 1465 sh = get_free_stripe(conf); 1466 spin_unlock_irq(&conf->device_lock); 1467 if (!sh) 1468 return 0; 1469 BUG_ON(atomic_read(&sh->count)); 1470 shrink_buffers(sh, conf->pool_size); 1471 kmem_cache_free(conf->slab_cache, sh); 1472 atomic_dec(&conf->active_stripes); 1473 return 1; 1474} 1475 1476static void shrink_stripes(raid5_conf_t *conf) 1477{ 1478 while (drop_one_stripe(conf)) 1479 ; 1480 1481 if (conf->slab_cache) 1482 kmem_cache_destroy(conf->slab_cache); 1483 conf->slab_cache = NULL; 1484} 1485 1486static void raid5_end_read_request(struct bio * bi, int error) 1487{ 1488 struct stripe_head *sh = bi->bi_private; 1489 raid5_conf_t *conf = sh->raid_conf; 1490 int disks = sh->disks, i; 1491 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1492 char b[BDEVNAME_SIZE]; 1493 mdk_rdev_t *rdev; 1494 1495 1496 for (i=0 ; i<disks; i++) 1497 if (bi == &sh->dev[i].req) 1498 break; 1499 1500 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1501 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1502 uptodate); 1503 if (i == disks) { 1504 BUG(); 1505 return; 1506 } 1507 1508 if (uptodate) { 1509 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1510 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1511 rdev = conf->disks[i].rdev; 1512 printk_rl(KERN_INFO "raid5:%s: read error corrected" 1513 " (%lu sectors at %llu on %s)\n", 1514 mdname(conf->mddev), STRIPE_SECTORS, 1515 (unsigned long long)(sh->sector 1516 + rdev->data_offset), 1517 bdevname(rdev->bdev, b)); 1518 clear_bit(R5_ReadError, &sh->dev[i].flags); 1519 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1520 } 1521 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1522 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1523 } else { 1524 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1525 int retry = 0; 1526 rdev = conf->disks[i].rdev; 1527 1528 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1529 atomic_inc(&rdev->read_errors); 1530 if (conf->mddev->degraded) 1531 printk_rl(KERN_WARNING 1532 "raid5:%s: read error not correctable " 1533 "(sector %llu on %s).\n", 1534 mdname(conf->mddev), 1535 (unsigned long long)(sh->sector 1536 + rdev->data_offset), 1537 bdn); 1538 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1539 /* Oh, no!!! */ 1540 printk_rl(KERN_WARNING 1541 "raid5:%s: read error NOT corrected!! " 1542 "(sector %llu on %s).\n", 1543 mdname(conf->mddev), 1544 (unsigned long long)(sh->sector 1545 + rdev->data_offset), 1546 bdn); 1547 else if (atomic_read(&rdev->read_errors) 1548 > conf->max_nr_stripes) 1549 printk(KERN_WARNING 1550 "raid5:%s: Too many read errors, failing device %s.\n", 1551 mdname(conf->mddev), bdn); 1552 else 1553 retry = 1; 1554 if (retry) 1555 set_bit(R5_ReadError, &sh->dev[i].flags); 1556 else { 1557 clear_bit(R5_ReadError, &sh->dev[i].flags); 1558 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1559 md_error(conf->mddev, rdev); 1560 } 1561 } 1562 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1563 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1564 set_bit(STRIPE_HANDLE, &sh->state); 1565 release_stripe(sh); 1566} 1567 1568static void raid5_end_write_request(struct bio *bi, int error) 1569{ 1570 struct stripe_head *sh = bi->bi_private; 1571 raid5_conf_t *conf = sh->raid_conf; 1572 int disks = sh->disks, i; 1573 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1574 1575 for (i=0 ; i<disks; i++) 1576 if (bi == &sh->dev[i].req) 1577 break; 1578 1579 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1580 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1581 uptodate); 1582 if (i == disks) { 1583 BUG(); 1584 return; 1585 } 1586 1587 if (!uptodate) 1588 md_error(conf->mddev, conf->disks[i].rdev); 1589 1590 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1591 1592 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1593 set_bit(STRIPE_HANDLE, &sh->state); 1594 release_stripe(sh); 1595} 1596 1597 1598static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 1599 1600static void raid5_build_block(struct stripe_head *sh, int i, int previous) 1601{ 1602 struct r5dev *dev = &sh->dev[i]; 1603 1604 bio_init(&dev->req); 1605 dev->req.bi_io_vec = &dev->vec; 1606 dev->req.bi_vcnt++; 1607 dev->req.bi_max_vecs++; 1608 dev->vec.bv_page = dev->page; 1609 dev->vec.bv_len = STRIPE_SIZE; 1610 dev->vec.bv_offset = 0; 1611 1612 dev->req.bi_sector = sh->sector; 1613 dev->req.bi_private = sh; 1614 1615 dev->flags = 0; 1616 dev->sector = compute_blocknr(sh, i, previous); 1617} 1618 1619static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1620{ 1621 char b[BDEVNAME_SIZE]; 1622 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1623 pr_debug("raid5: error called\n"); 1624 1625 if (!test_bit(Faulty, &rdev->flags)) { 1626 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1627 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1628 unsigned long flags; 1629 spin_lock_irqsave(&conf->device_lock, flags); 1630 mddev->degraded++; 1631 spin_unlock_irqrestore(&conf->device_lock, flags); 1632 /* 1633 * if recovery was running, make sure it aborts. 1634 */ 1635 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1636 } 1637 set_bit(Faulty, &rdev->flags); 1638 printk(KERN_ALERT 1639 "raid5: Disk failure on %s, disabling device.\n" 1640 "raid5: Operation continuing on %d devices.\n", 1641 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1642 } 1643} 1644 1645/* 1646 * Input: a 'big' sector number, 1647 * Output: index of the data and parity disk, and the sector # in them. 1648 */ 1649static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, 1650 int previous, int *dd_idx, 1651 struct stripe_head *sh) 1652{ 1653 sector_t stripe; 1654 sector_t chunk_number; 1655 unsigned int chunk_offset; 1656 int pd_idx, qd_idx; 1657 int ddf_layout = 0; 1658 sector_t new_sector; 1659 int algorithm = previous ? conf->prev_algo 1660 : conf->algorithm; 1661 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 1662 : conf->chunk_sectors; 1663 int raid_disks = previous ? conf->previous_raid_disks 1664 : conf->raid_disks; 1665 int data_disks = raid_disks - conf->max_degraded; 1666 1667 /* First compute the information on this sector */ 1668 1669 /* 1670 * Compute the chunk number and the sector offset inside the chunk 1671 */ 1672 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1673 chunk_number = r_sector; 1674 1675 /* 1676 * Compute the stripe number 1677 */ 1678 stripe = chunk_number; 1679 *dd_idx = sector_div(stripe, data_disks); 1680 1681 /* 1682 * Select the parity disk based on the user selected algorithm. 1683 */ 1684 pd_idx = qd_idx = ~0; 1685 switch(conf->level) { 1686 case 4: 1687 pd_idx = data_disks; 1688 break; 1689 case 5: 1690 switch (algorithm) { 1691 case ALGORITHM_LEFT_ASYMMETRIC: 1692 pd_idx = data_disks - stripe % raid_disks; 1693 if (*dd_idx >= pd_idx) 1694 (*dd_idx)++; 1695 break; 1696 case ALGORITHM_RIGHT_ASYMMETRIC: 1697 pd_idx = stripe % raid_disks; 1698 if (*dd_idx >= pd_idx) 1699 (*dd_idx)++; 1700 break; 1701 case ALGORITHM_LEFT_SYMMETRIC: 1702 pd_idx = data_disks - stripe % raid_disks; 1703 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1704 break; 1705 case ALGORITHM_RIGHT_SYMMETRIC: 1706 pd_idx = stripe % raid_disks; 1707 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1708 break; 1709 case ALGORITHM_PARITY_0: 1710 pd_idx = 0; 1711 (*dd_idx)++; 1712 break; 1713 case ALGORITHM_PARITY_N: 1714 pd_idx = data_disks; 1715 break; 1716 default: 1717 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1718 algorithm); 1719 BUG(); 1720 } 1721 break; 1722 case 6: 1723 1724 switch (algorithm) { 1725 case ALGORITHM_LEFT_ASYMMETRIC: 1726 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1727 qd_idx = pd_idx + 1; 1728 if (pd_idx == raid_disks-1) { 1729 (*dd_idx)++; /* Q D D D P */ 1730 qd_idx = 0; 1731 } else if (*dd_idx >= pd_idx) 1732 (*dd_idx) += 2; /* D D P Q D */ 1733 break; 1734 case ALGORITHM_RIGHT_ASYMMETRIC: 1735 pd_idx = stripe % raid_disks; 1736 qd_idx = pd_idx + 1; 1737 if (pd_idx == raid_disks-1) { 1738 (*dd_idx)++; /* Q D D D P */ 1739 qd_idx = 0; 1740 } else if (*dd_idx >= pd_idx) 1741 (*dd_idx) += 2; /* D D P Q D */ 1742 break; 1743 case ALGORITHM_LEFT_SYMMETRIC: 1744 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1745 qd_idx = (pd_idx + 1) % raid_disks; 1746 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1747 break; 1748 case ALGORITHM_RIGHT_SYMMETRIC: 1749 pd_idx = stripe % raid_disks; 1750 qd_idx = (pd_idx + 1) % raid_disks; 1751 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1752 break; 1753 1754 case ALGORITHM_PARITY_0: 1755 pd_idx = 0; 1756 qd_idx = 1; 1757 (*dd_idx) += 2; 1758 break; 1759 case ALGORITHM_PARITY_N: 1760 pd_idx = data_disks; 1761 qd_idx = data_disks + 1; 1762 break; 1763 1764 case ALGORITHM_ROTATING_ZERO_RESTART: 1765 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1766 * of blocks for computing Q is different. 1767 */ 1768 pd_idx = stripe % raid_disks; 1769 qd_idx = pd_idx + 1; 1770 if (pd_idx == raid_disks-1) { 1771 (*dd_idx)++; /* Q D D D P */ 1772 qd_idx = 0; 1773 } else if (*dd_idx >= pd_idx) 1774 (*dd_idx) += 2; /* D D P Q D */ 1775 ddf_layout = 1; 1776 break; 1777 1778 case ALGORITHM_ROTATING_N_RESTART: 1779 /* Same a left_asymmetric, by first stripe is 1780 * D D D P Q rather than 1781 * Q D D D P 1782 */ 1783 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); 1784 qd_idx = pd_idx + 1; 1785 if (pd_idx == raid_disks-1) { 1786 (*dd_idx)++; /* Q D D D P */ 1787 qd_idx = 0; 1788 } else if (*dd_idx >= pd_idx) 1789 (*dd_idx) += 2; /* D D P Q D */ 1790 ddf_layout = 1; 1791 break; 1792 1793 case ALGORITHM_ROTATING_N_CONTINUE: 1794 /* Same as left_symmetric but Q is before P */ 1795 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1796 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1797 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1798 ddf_layout = 1; 1799 break; 1800 1801 case ALGORITHM_LEFT_ASYMMETRIC_6: 1802 /* RAID5 left_asymmetric, with Q on last device */ 1803 pd_idx = data_disks - stripe % (raid_disks-1); 1804 if (*dd_idx >= pd_idx) 1805 (*dd_idx)++; 1806 qd_idx = raid_disks - 1; 1807 break; 1808 1809 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1810 pd_idx = stripe % (raid_disks-1); 1811 if (*dd_idx >= pd_idx) 1812 (*dd_idx)++; 1813 qd_idx = raid_disks - 1; 1814 break; 1815 1816 case ALGORITHM_LEFT_SYMMETRIC_6: 1817 pd_idx = data_disks - stripe % (raid_disks-1); 1818 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1819 qd_idx = raid_disks - 1; 1820 break; 1821 1822 case ALGORITHM_RIGHT_SYMMETRIC_6: 1823 pd_idx = stripe % (raid_disks-1); 1824 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1825 qd_idx = raid_disks - 1; 1826 break; 1827 1828 case ALGORITHM_PARITY_0_6: 1829 pd_idx = 0; 1830 (*dd_idx)++; 1831 qd_idx = raid_disks - 1; 1832 break; 1833 1834 1835 default: 1836 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1837 algorithm); 1838 BUG(); 1839 } 1840 break; 1841 } 1842 1843 if (sh) { 1844 sh->pd_idx = pd_idx; 1845 sh->qd_idx = qd_idx; 1846 sh->ddf_layout = ddf_layout; 1847 } 1848 /* 1849 * Finally, compute the new sector number 1850 */ 1851 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1852 return new_sector; 1853} 1854 1855 1856static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 1857{ 1858 raid5_conf_t *conf = sh->raid_conf; 1859 int raid_disks = sh->disks; 1860 int data_disks = raid_disks - conf->max_degraded; 1861 sector_t new_sector = sh->sector, check; 1862 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 1863 : conf->chunk_sectors; 1864 int algorithm = previous ? conf->prev_algo 1865 : conf->algorithm; 1866 sector_t stripe; 1867 int chunk_offset; 1868 sector_t chunk_number; 1869 int dummy1, dd_idx = i; 1870 sector_t r_sector; 1871 struct stripe_head sh2; 1872 1873 1874 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1875 stripe = new_sector; 1876 1877 if (i == sh->pd_idx) 1878 return 0; 1879 switch(conf->level) { 1880 case 4: break; 1881 case 5: 1882 switch (algorithm) { 1883 case ALGORITHM_LEFT_ASYMMETRIC: 1884 case ALGORITHM_RIGHT_ASYMMETRIC: 1885 if (i > sh->pd_idx) 1886 i--; 1887 break; 1888 case ALGORITHM_LEFT_SYMMETRIC: 1889 case ALGORITHM_RIGHT_SYMMETRIC: 1890 if (i < sh->pd_idx) 1891 i += raid_disks; 1892 i -= (sh->pd_idx + 1); 1893 break; 1894 case ALGORITHM_PARITY_0: 1895 i -= 1; 1896 break; 1897 case ALGORITHM_PARITY_N: 1898 break; 1899 default: 1900 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1901 algorithm); 1902 BUG(); 1903 } 1904 break; 1905 case 6: 1906 if (i == sh->qd_idx) 1907 return 0; /* It is the Q disk */ 1908 switch (algorithm) { 1909 case ALGORITHM_LEFT_ASYMMETRIC: 1910 case ALGORITHM_RIGHT_ASYMMETRIC: 1911 case ALGORITHM_ROTATING_ZERO_RESTART: 1912 case ALGORITHM_ROTATING_N_RESTART: 1913 if (sh->pd_idx == raid_disks-1) 1914 i--; /* Q D D D P */ 1915 else if (i > sh->pd_idx) 1916 i -= 2; /* D D P Q D */ 1917 break; 1918 case ALGORITHM_LEFT_SYMMETRIC: 1919 case ALGORITHM_RIGHT_SYMMETRIC: 1920 if (sh->pd_idx == raid_disks-1) 1921 i--; /* Q D D D P */ 1922 else { 1923 /* D D P Q D */ 1924 if (i < sh->pd_idx) 1925 i += raid_disks; 1926 i -= (sh->pd_idx + 2); 1927 } 1928 break; 1929 case ALGORITHM_PARITY_0: 1930 i -= 2; 1931 break; 1932 case ALGORITHM_PARITY_N: 1933 break; 1934 case ALGORITHM_ROTATING_N_CONTINUE: 1935 /* Like left_symmetric, but P is before Q */ 1936 if (sh->pd_idx == 0) 1937 i--; /* P D D D Q */ 1938 else { 1939 /* D D Q P D */ 1940 if (i < sh->pd_idx) 1941 i += raid_disks; 1942 i -= (sh->pd_idx + 1); 1943 } 1944 break; 1945 case ALGORITHM_LEFT_ASYMMETRIC_6: 1946 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1947 if (i > sh->pd_idx) 1948 i--; 1949 break; 1950 case ALGORITHM_LEFT_SYMMETRIC_6: 1951 case ALGORITHM_RIGHT_SYMMETRIC_6: 1952 if (i < sh->pd_idx) 1953 i += data_disks + 1; 1954 i -= (sh->pd_idx + 1); 1955 break; 1956 case ALGORITHM_PARITY_0_6: 1957 i -= 1; 1958 break; 1959 default: 1960 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1961 algorithm); 1962 BUG(); 1963 } 1964 break; 1965 } 1966 1967 chunk_number = stripe * data_disks + i; 1968 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 1969 1970 check = raid5_compute_sector(conf, r_sector, 1971 previous, &dummy1, &sh2); 1972 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 1973 || sh2.qd_idx != sh->qd_idx) { 1974 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1975 return 0; 1976 } 1977 return r_sector; 1978} 1979 1980 1981static void 1982schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 1983 int rcw, int expand) 1984{ 1985 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1986 raid5_conf_t *conf = sh->raid_conf; 1987 int level = conf->level; 1988 1989 if (rcw) { 1990 /* if we are not expanding this is a proper write request, and 1991 * there will be bios with new data to be drained into the 1992 * stripe cache 1993 */ 1994 if (!expand) { 1995 sh->reconstruct_state = reconstruct_state_drain_run; 1996 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1997 } else 1998 sh->reconstruct_state = reconstruct_state_run; 1999 2000 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2001 2002 for (i = disks; i--; ) { 2003 struct r5dev *dev = &sh->dev[i]; 2004 2005 if (dev->towrite) { 2006 set_bit(R5_LOCKED, &dev->flags); 2007 set_bit(R5_Wantdrain, &dev->flags); 2008 if (!expand) 2009 clear_bit(R5_UPTODATE, &dev->flags); 2010 s->locked++; 2011 } 2012 } 2013 if (s->locked + conf->max_degraded == disks) 2014 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2015 atomic_inc(&conf->pending_full_writes); 2016 } else { 2017 BUG_ON(level == 6); 2018 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2019 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2020 2021 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 2022 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 2023 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2024 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2025 2026 for (i = disks; i--; ) { 2027 struct r5dev *dev = &sh->dev[i]; 2028 if (i == pd_idx) 2029 continue; 2030 2031 if (dev->towrite && 2032 (test_bit(R5_UPTODATE, &dev->flags) || 2033 test_bit(R5_Wantcompute, &dev->flags))) { 2034 set_bit(R5_Wantdrain, &dev->flags); 2035 set_bit(R5_LOCKED, &dev->flags); 2036 clear_bit(R5_UPTODATE, &dev->flags); 2037 s->locked++; 2038 } 2039 } 2040 } 2041 2042 /* keep the parity disk(s) locked while asynchronous operations 2043 * are in flight 2044 */ 2045 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 2046 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2047 s->locked++; 2048 2049 if (level == 6) { 2050 int qd_idx = sh->qd_idx; 2051 struct r5dev *dev = &sh->dev[qd_idx]; 2052 2053 set_bit(R5_LOCKED, &dev->flags); 2054 clear_bit(R5_UPTODATE, &dev->flags); 2055 s->locked++; 2056 } 2057 2058 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 2059 __func__, (unsigned long long)sh->sector, 2060 s->locked, s->ops_request); 2061} 2062 2063/* 2064 * Each stripe/dev can have one or more bion attached. 2065 * toread/towrite point to the first in a chain. 2066 * The bi_next chain must be in order. 2067 */ 2068static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 2069{ 2070 struct bio **bip; 2071 raid5_conf_t *conf = sh->raid_conf; 2072 int firstwrite=0; 2073 2074 pr_debug("adding bh b#%llu to stripe s#%llu\n", 2075 (unsigned long long)bi->bi_sector, 2076 (unsigned long long)sh->sector); 2077 2078 2079 spin_lock(&sh->lock); 2080 spin_lock_irq(&conf->device_lock); 2081 if (forwrite) { 2082 bip = &sh->dev[dd_idx].towrite; 2083 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 2084 firstwrite = 1; 2085 } else 2086 bip = &sh->dev[dd_idx].toread; 2087 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2088 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 2089 goto overlap; 2090 bip = & (*bip)->bi_next; 2091 } 2092 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 2093 goto overlap; 2094 2095 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2096 if (*bip) 2097 bi->bi_next = *bip; 2098 *bip = bi; 2099 bi->bi_phys_segments++; 2100 spin_unlock_irq(&conf->device_lock); 2101 spin_unlock(&sh->lock); 2102 2103 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2104 (unsigned long long)bi->bi_sector, 2105 (unsigned long long)sh->sector, dd_idx); 2106 2107 if (conf->mddev->bitmap && firstwrite) { 2108 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 2109 STRIPE_SECTORS, 0); 2110 sh->bm_seq = conf->seq_flush+1; 2111 set_bit(STRIPE_BIT_DELAY, &sh->state); 2112 } 2113 2114 if (forwrite) { 2115 /* check if page is covered */ 2116 sector_t sector = sh->dev[dd_idx].sector; 2117 for (bi=sh->dev[dd_idx].towrite; 2118 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2119 bi && bi->bi_sector <= sector; 2120 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2121 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 2122 sector = bi->bi_sector + (bi->bi_size>>9); 2123 } 2124 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2125 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 2126 } 2127 return 1; 2128 2129 overlap: 2130 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 2131 spin_unlock_irq(&conf->device_lock); 2132 spin_unlock(&sh->lock); 2133 return 0; 2134} 2135 2136static void end_reshape(raid5_conf_t *conf); 2137 2138static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 2139 struct stripe_head *sh) 2140{ 2141 int sectors_per_chunk = 2142 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 2143 int dd_idx; 2144 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2145 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2146 2147 raid5_compute_sector(conf, 2148 stripe * (disks - conf->max_degraded) 2149 *sectors_per_chunk + chunk_offset, 2150 previous, 2151 &dd_idx, sh); 2152} 2153 2154static void 2155handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, 2156 struct stripe_head_state *s, int disks, 2157 struct bio **return_bi) 2158{ 2159 int i; 2160 for (i = disks; i--; ) { 2161 struct bio *bi; 2162 int bitmap_end = 0; 2163 2164 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2165 mdk_rdev_t *rdev; 2166 rcu_read_lock(); 2167 rdev = rcu_dereference(conf->disks[i].rdev); 2168 if (rdev && test_bit(In_sync, &rdev->flags)) 2169 /* multiple read failures in one stripe */ 2170 md_error(conf->mddev, rdev); 2171 rcu_read_unlock(); 2172 } 2173 spin_lock_irq(&conf->device_lock); 2174 /* fail all writes first */ 2175 bi = sh->dev[i].towrite; 2176 sh->dev[i].towrite = NULL; 2177 if (bi) { 2178 s->to_write--; 2179 bitmap_end = 1; 2180 } 2181 2182 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2183 wake_up(&conf->wait_for_overlap); 2184 2185 while (bi && bi->bi_sector < 2186 sh->dev[i].sector + STRIPE_SECTORS) { 2187 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2188 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2189 if (!raid5_dec_bi_phys_segments(bi)) { 2190 md_write_end(conf->mddev); 2191 bi->bi_next = *return_bi; 2192 *return_bi = bi; 2193 } 2194 bi = nextbi; 2195 } 2196 /* and fail all 'written' */ 2197 bi = sh->dev[i].written; 2198 sh->dev[i].written = NULL; 2199 if (bi) bitmap_end = 1; 2200 while (bi && bi->bi_sector < 2201 sh->dev[i].sector + STRIPE_SECTORS) { 2202 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2203 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2204 if (!raid5_dec_bi_phys_segments(bi)) { 2205 md_write_end(conf->mddev); 2206 bi->bi_next = *return_bi; 2207 *return_bi = bi; 2208 } 2209 bi = bi2; 2210 } 2211 2212 /* fail any reads if this device is non-operational and 2213 * the data has not reached the cache yet. 2214 */ 2215 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2216 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2217 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2218 bi = sh->dev[i].toread; 2219 sh->dev[i].toread = NULL; 2220 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2221 wake_up(&conf->wait_for_overlap); 2222 if (bi) s->to_read--; 2223 while (bi && bi->bi_sector < 2224 sh->dev[i].sector + STRIPE_SECTORS) { 2225 struct bio *nextbi = 2226 r5_next_bio(bi, sh->dev[i].sector); 2227 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2228 if (!raid5_dec_bi_phys_segments(bi)) { 2229 bi->bi_next = *return_bi; 2230 *return_bi = bi; 2231 } 2232 bi = nextbi; 2233 } 2234 } 2235 spin_unlock_irq(&conf->device_lock); 2236 if (bitmap_end) 2237 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2238 STRIPE_SECTORS, 0, 0); 2239 } 2240 2241 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2242 if (atomic_dec_and_test(&conf->pending_full_writes)) 2243 md_wakeup_thread(conf->mddev->thread); 2244} 2245 2246/* fetch_block5 - checks the given member device to see if its data needs 2247 * to be read or computed to satisfy a request. 2248 * 2249 * Returns 1 when no more member devices need to be checked, otherwise returns 2250 * 0 to tell the loop in handle_stripe_fill5 to continue 2251 */ 2252static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, 2253 int disk_idx, int disks) 2254{ 2255 struct r5dev *dev = &sh->dev[disk_idx]; 2256 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 2257 2258 /* is the data in this block needed, and can we get it? */ 2259 if (!test_bit(R5_LOCKED, &dev->flags) && 2260 !test_bit(R5_UPTODATE, &dev->flags) && 2261 (dev->toread || 2262 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2263 s->syncing || s->expanding || 2264 (s->failed && 2265 (failed_dev->toread || 2266 (failed_dev->towrite && 2267 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { 2268 /* We would like to get this block, possibly by computing it, 2269 * otherwise read it if the backing disk is insync 2270 */ 2271 if ((s->uptodate == disks - 1) && 2272 (s->failed && disk_idx == s->failed_num)) { 2273 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2274 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2275 set_bit(R5_Wantcompute, &dev->flags); 2276 sh->ops.target = disk_idx; 2277 sh->ops.target2 = -1; 2278 s->req_compute = 1; 2279 /* Careful: from this point on 'uptodate' is in the eye 2280 * of raid_run_ops which services 'compute' operations 2281 * before writes. R5_Wantcompute flags a block that will 2282 * be R5_UPTODATE by the time it is needed for a 2283 * subsequent operation. 2284 */ 2285 s->uptodate++; 2286 return 1; /* uptodate + compute == disks */ 2287 } else if (test_bit(R5_Insync, &dev->flags)) { 2288 set_bit(R5_LOCKED, &dev->flags); 2289 set_bit(R5_Wantread, &dev->flags); 2290 s->locked++; 2291 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2292 s->syncing); 2293 } 2294 } 2295 2296 return 0; 2297} 2298 2299/** 2300 * handle_stripe_fill5 - read or compute data to satisfy pending requests. 2301 */ 2302static void handle_stripe_fill5(struct stripe_head *sh, 2303 struct stripe_head_state *s, int disks) 2304{ 2305 int i; 2306 2307 /* look for blocks to read/compute, skip this if a compute 2308 * is already in flight, or if the stripe contents are in the 2309 * midst of changing due to a write 2310 */ 2311 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2312 !sh->reconstruct_state) 2313 for (i = disks; i--; ) 2314 if (fetch_block5(sh, s, i, disks)) 2315 break; 2316 set_bit(STRIPE_HANDLE, &sh->state); 2317} 2318 2319/* fetch_block6 - checks the given member device to see if its data needs 2320 * to be read or computed to satisfy a request. 2321 * 2322 * Returns 1 when no more member devices need to be checked, otherwise returns 2323 * 0 to tell the loop in handle_stripe_fill6 to continue 2324 */ 2325static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, 2326 struct r6_state *r6s, int disk_idx, int disks) 2327{ 2328 struct r5dev *dev = &sh->dev[disk_idx]; 2329 struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]], 2330 &sh->dev[r6s->failed_num[1]] }; 2331 2332 if (!test_bit(R5_LOCKED, &dev->flags) && 2333 !test_bit(R5_UPTODATE, &dev->flags) && 2334 (dev->toread || 2335 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2336 s->syncing || s->expanding || 2337 (s->failed >= 1 && 2338 (fdev[0]->toread || s->to_write)) || 2339 (s->failed >= 2 && 2340 (fdev[1]->toread || s->to_write)))) { 2341 /* we would like to get this block, possibly by computing it, 2342 * otherwise read it if the backing disk is insync 2343 */ 2344 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 2345 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 2346 if ((s->uptodate == disks - 1) && 2347 (s->failed && (disk_idx == r6s->failed_num[0] || 2348 disk_idx == r6s->failed_num[1]))) { 2349 /* have disk failed, and we're requested to fetch it; 2350 * do compute it 2351 */ 2352 pr_debug("Computing stripe %llu block %d\n", 2353 (unsigned long long)sh->sector, disk_idx); 2354 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2355 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2356 set_bit(R5_Wantcompute, &dev->flags); 2357 sh->ops.target = disk_idx; 2358 sh->ops.target2 = -1; /* no 2nd target */ 2359 s->req_compute = 1; 2360 s->uptodate++; 2361 return 1; 2362 } else if (s->uptodate == disks-2 && s->failed >= 2) { 2363 /* Computing 2-failure is *very* expensive; only 2364 * do it if failed >= 2 2365 */ 2366 int other; 2367 for (other = disks; other--; ) { 2368 if (other == disk_idx) 2369 continue; 2370 if (!test_bit(R5_UPTODATE, 2371 &sh->dev[other].flags)) 2372 break; 2373 } 2374 BUG_ON(other < 0); 2375 pr_debug("Computing stripe %llu blocks %d,%d\n", 2376 (unsigned long long)sh->sector, 2377 disk_idx, other); 2378 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2379 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2380 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 2381 set_bit(R5_Wantcompute, &sh->dev[other].flags); 2382 sh->ops.target = disk_idx; 2383 sh->ops.target2 = other; 2384 s->uptodate += 2; 2385 s->req_compute = 1; 2386 return 1; 2387 } else if (test_bit(R5_Insync, &dev->flags)) { 2388 set_bit(R5_LOCKED, &dev->flags); 2389 set_bit(R5_Wantread, &dev->flags); 2390 s->locked++; 2391 pr_debug("Reading block %d (sync=%d)\n", 2392 disk_idx, s->syncing); 2393 } 2394 } 2395 2396 return 0; 2397} 2398 2399/** 2400 * handle_stripe_fill6 - read or compute data to satisfy pending requests. 2401 */ 2402static void handle_stripe_fill6(struct stripe_head *sh, 2403 struct stripe_head_state *s, struct r6_state *r6s, 2404 int disks) 2405{ 2406 int i; 2407 2408 /* look for blocks to read/compute, skip this if a compute 2409 * is already in flight, or if the stripe contents are in the 2410 * midst of changing due to a write 2411 */ 2412 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2413 !sh->reconstruct_state) 2414 for (i = disks; i--; ) 2415 if (fetch_block6(sh, s, r6s, i, disks)) 2416 break; 2417 set_bit(STRIPE_HANDLE, &sh->state); 2418} 2419 2420 2421/* handle_stripe_clean_event 2422 * any written block on an uptodate or failed drive can be returned. 2423 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2424 * never LOCKED, so we don't need to test 'failed' directly. 2425 */ 2426static void handle_stripe_clean_event(raid5_conf_t *conf, 2427 struct stripe_head *sh, int disks, struct bio **return_bi) 2428{ 2429 int i; 2430 struct r5dev *dev; 2431 2432 for (i = disks; i--; ) 2433 if (sh->dev[i].written) { 2434 dev = &sh->dev[i]; 2435 if (!test_bit(R5_LOCKED, &dev->flags) && 2436 test_bit(R5_UPTODATE, &dev->flags)) { 2437 /* We can return any write requests */ 2438 struct bio *wbi, *wbi2; 2439 int bitmap_end = 0; 2440 pr_debug("Return write for disc %d\n", i); 2441 spin_lock_irq(&conf->device_lock); 2442 wbi = dev->written; 2443 dev->written = NULL; 2444 while (wbi && wbi->bi_sector < 2445 dev->sector + STRIPE_SECTORS) { 2446 wbi2 = r5_next_bio(wbi, dev->sector); 2447 if (!raid5_dec_bi_phys_segments(wbi)) { 2448 md_write_end(conf->mddev); 2449 wbi->bi_next = *return_bi; 2450 *return_bi = wbi; 2451 } 2452 wbi = wbi2; 2453 } 2454 if (dev->towrite == NULL) 2455 bitmap_end = 1; 2456 spin_unlock_irq(&conf->device_lock); 2457 if (bitmap_end) 2458 bitmap_endwrite(conf->mddev->bitmap, 2459 sh->sector, 2460 STRIPE_SECTORS, 2461 !test_bit(STRIPE_DEGRADED, &sh->state), 2462 0); 2463 } 2464 } 2465 2466 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2467 if (atomic_dec_and_test(&conf->pending_full_writes)) 2468 md_wakeup_thread(conf->mddev->thread); 2469} 2470 2471static void handle_stripe_dirtying5(raid5_conf_t *conf, 2472 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2473{ 2474 int rmw = 0, rcw = 0, i; 2475 for (i = disks; i--; ) { 2476 /* would I have to read this buffer for read_modify_write */ 2477 struct r5dev *dev = &sh->dev[i]; 2478 if ((dev->towrite || i == sh->pd_idx) && 2479 !test_bit(R5_LOCKED, &dev->flags) && 2480 !(test_bit(R5_UPTODATE, &dev->flags) || 2481 test_bit(R5_Wantcompute, &dev->flags))) { 2482 if (test_bit(R5_Insync, &dev->flags)) 2483 rmw++; 2484 else 2485 rmw += 2*disks; /* cannot read it */ 2486 } 2487 /* Would I have to read this buffer for reconstruct_write */ 2488 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2489 !test_bit(R5_LOCKED, &dev->flags) && 2490 !(test_bit(R5_UPTODATE, &dev->flags) || 2491 test_bit(R5_Wantcompute, &dev->flags))) { 2492 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2493 else 2494 rcw += 2*disks; 2495 } 2496 } 2497 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2498 (unsigned long long)sh->sector, rmw, rcw); 2499 set_bit(STRIPE_HANDLE, &sh->state); 2500 if (rmw < rcw && rmw > 0) 2501 /* prefer read-modify-write, but need to get some data */ 2502 for (i = disks; i--; ) { 2503 struct r5dev *dev = &sh->dev[i]; 2504 if ((dev->towrite || i == sh->pd_idx) && 2505 !test_bit(R5_LOCKED, &dev->flags) && 2506 !(test_bit(R5_UPTODATE, &dev->flags) || 2507 test_bit(R5_Wantcompute, &dev->flags)) && 2508 test_bit(R5_Insync, &dev->flags)) { 2509 if ( 2510 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2511 pr_debug("Read_old block " 2512 "%d for r-m-w\n", i); 2513 set_bit(R5_LOCKED, &dev->flags); 2514 set_bit(R5_Wantread, &dev->flags); 2515 s->locked++; 2516 } else { 2517 set_bit(STRIPE_DELAYED, &sh->state); 2518 set_bit(STRIPE_HANDLE, &sh->state); 2519 } 2520 } 2521 } 2522 if (rcw <= rmw && rcw > 0) 2523 /* want reconstruct write, but need to get some data */ 2524 for (i = disks; i--; ) { 2525 struct r5dev *dev = &sh->dev[i]; 2526 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2527 i != sh->pd_idx && 2528 !test_bit(R5_LOCKED, &dev->flags) && 2529 !(test_bit(R5_UPTODATE, &dev->flags) || 2530 test_bit(R5_Wantcompute, &dev->flags)) && 2531 test_bit(R5_Insync, &dev->flags)) { 2532 if ( 2533 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2534 pr_debug("Read_old block " 2535 "%d for Reconstruct\n", i); 2536 set_bit(R5_LOCKED, &dev->flags); 2537 set_bit(R5_Wantread, &dev->flags); 2538 s->locked++; 2539 } else { 2540 set_bit(STRIPE_DELAYED, &sh->state); 2541 set_bit(STRIPE_HANDLE, &sh->state); 2542 } 2543 } 2544 } 2545 /* now if nothing is locked, and if we have enough data, 2546 * we can start a write request 2547 */ 2548 /* since handle_stripe can be called at any time we need to handle the 2549 * case where a compute block operation has been submitted and then a 2550 * subsequent call wants to start a write request. raid_run_ops only 2551 * handles the case where compute block and reconstruct are requested 2552 * simultaneously. If this is not the case then new writes need to be 2553 * held off until the compute completes. 2554 */ 2555 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2556 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2557 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2558 schedule_reconstruction(sh, s, rcw == 0, 0); 2559} 2560 2561static void handle_stripe_dirtying6(raid5_conf_t *conf, 2562 struct stripe_head *sh, struct stripe_head_state *s, 2563 struct r6_state *r6s, int disks) 2564{ 2565 int rcw = 0, pd_idx = sh->pd_idx, i; 2566 int qd_idx = sh->qd_idx; 2567 2568 set_bit(STRIPE_HANDLE, &sh->state); 2569 for (i = disks; i--; ) { 2570 struct r5dev *dev = &sh->dev[i]; 2571 /* check if we haven't enough data */ 2572 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2573 i != pd_idx && i != qd_idx && 2574 !test_bit(R5_LOCKED, &dev->flags) && 2575 !(test_bit(R5_UPTODATE, &dev->flags) || 2576 test_bit(R5_Wantcompute, &dev->flags))) { 2577 rcw++; 2578 if (!test_bit(R5_Insync, &dev->flags)) 2579 continue; /* it's a failed drive */ 2580 2581 if ( 2582 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2583 pr_debug("Read_old stripe %llu " 2584 "block %d for Reconstruct\n", 2585 (unsigned long long)sh->sector, i); 2586 set_bit(R5_LOCKED, &dev->flags); 2587 set_bit(R5_Wantread, &dev->flags); 2588 s->locked++; 2589 } else { 2590 pr_debug("Request delayed stripe %llu " 2591 "block %d for Reconstruct\n", 2592 (unsigned long long)sh->sector, i); 2593 set_bit(STRIPE_DELAYED, &sh->state); 2594 set_bit(STRIPE_HANDLE, &sh->state); 2595 } 2596 } 2597 } 2598 /* now if nothing is locked, and if we have enough data, we can start a 2599 * write request 2600 */ 2601 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2602 s->locked == 0 && rcw == 0 && 2603 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2604 schedule_reconstruction(sh, s, 1, 0); 2605 } 2606} 2607 2608static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2609 struct stripe_head_state *s, int disks) 2610{ 2611 struct r5dev *dev = NULL; 2612 2613 set_bit(STRIPE_HANDLE, &sh->state); 2614 2615 switch (sh->check_state) { 2616 case check_state_idle: 2617 /* start a new check operation if there are no failures */ 2618 if (s->failed == 0) { 2619 BUG_ON(s->uptodate != disks); 2620 sh->check_state = check_state_run; 2621 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2622 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2623 s->uptodate--; 2624 break; 2625 } 2626 dev = &sh->dev[s->failed_num]; 2627 /* fall through */ 2628 case check_state_compute_result: 2629 sh->check_state = check_state_idle; 2630 if (!dev) 2631 dev = &sh->dev[sh->pd_idx]; 2632 2633 /* check that a write has not made the stripe insync */ 2634 if (test_bit(STRIPE_INSYNC, &sh->state)) 2635 break; 2636 2637 /* either failed parity check, or recovery is happening */ 2638 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2639 BUG_ON(s->uptodate != disks); 2640 2641 set_bit(R5_LOCKED, &dev->flags); 2642 s->locked++; 2643 set_bit(R5_Wantwrite, &dev->flags); 2644 2645 clear_bit(STRIPE_DEGRADED, &sh->state); 2646 set_bit(STRIPE_INSYNC, &sh->state); 2647 break; 2648 case check_state_run: 2649 break; /* we will be called again upon completion */ 2650 case check_state_check_result: 2651 sh->check_state = check_state_idle; 2652 2653 /* if a failure occurred during the check operation, leave 2654 * STRIPE_INSYNC not set and let the stripe be handled again 2655 */ 2656 if (s->failed) 2657 break; 2658 2659 /* handle a successful check operation, if parity is correct 2660 * we are done. Otherwise update the mismatch count and repair 2661 * parity if !MD_RECOVERY_CHECK 2662 */ 2663 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 2664 /* parity is correct (on disc, 2665 * not in buffer any more) 2666 */ 2667 set_bit(STRIPE_INSYNC, &sh->state); 2668 else { 2669 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2670 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2671 /* don't try to repair!! */ 2672 set_bit(STRIPE_INSYNC, &sh->state); 2673 else { 2674 sh->check_state = check_state_compute_run; 2675 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2676 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2677 set_bit(R5_Wantcompute, 2678 &sh->dev[sh->pd_idx].flags); 2679 sh->ops.target = sh->pd_idx; 2680 sh->ops.target2 = -1; 2681 s->uptodate++; 2682 } 2683 } 2684 break; 2685 case check_state_compute_run: 2686 break; 2687 default: 2688 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2689 __func__, sh->check_state, 2690 (unsigned long long) sh->sector); 2691 BUG(); 2692 } 2693} 2694 2695 2696static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2697 struct stripe_head_state *s, 2698 struct r6_state *r6s, int disks) 2699{ 2700 int pd_idx = sh->pd_idx; 2701 int qd_idx = sh->qd_idx; 2702 struct r5dev *dev; 2703 2704 set_bit(STRIPE_HANDLE, &sh->state); 2705 2706 BUG_ON(s->failed > 2); 2707 2708 /* Want to check and possibly repair P and Q. 2709 * However there could be one 'failed' device, in which 2710 * case we can only check one of them, possibly using the 2711 * other to generate missing data 2712 */ 2713 2714 switch (sh->check_state) { 2715 case check_state_idle: 2716 /* start a new check operation if there are < 2 failures */ 2717 if (s->failed == r6s->q_failed) { 2718 /* The only possible failed device holds Q, so it 2719 * makes sense to check P (If anything else were failed, 2720 * we would have used P to recreate it). 2721 */ 2722 sh->check_state = check_state_run; 2723 } 2724 if (!r6s->q_failed && s->failed < 2) { 2725 /* Q is not failed, and we didn't use it to generate 2726 * anything, so it makes sense to check it 2727 */ 2728 if (sh->check_state == check_state_run) 2729 sh->check_state = check_state_run_pq; 2730 else 2731 sh->check_state = check_state_run_q; 2732 } 2733 2734 /* discard potentially stale zero_sum_result */ 2735 sh->ops.zero_sum_result = 0; 2736 2737 if (sh->check_state == check_state_run) { 2738 /* async_xor_zero_sum destroys the contents of P */ 2739 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2740 s->uptodate--; 2741 } 2742 if (sh->check_state >= check_state_run && 2743 sh->check_state <= check_state_run_pq) { 2744 /* async_syndrome_zero_sum preserves P and Q, so 2745 * no need to mark them !uptodate here 2746 */ 2747 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2748 break; 2749 } 2750 2751 /* we have 2-disk failure */ 2752 BUG_ON(s->failed != 2); 2753 /* fall through */ 2754 case check_state_compute_result: 2755 sh->check_state = check_state_idle; 2756 2757 /* check that a write has not made the stripe insync */ 2758 if (test_bit(STRIPE_INSYNC, &sh->state)) 2759 break; 2760 2761 /* now write out any block on a failed drive, 2762 * or P or Q if they were recomputed 2763 */ 2764 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 2765 if (s->failed == 2) { 2766 dev = &sh->dev[r6s->failed_num[1]]; 2767 s->locked++; 2768 set_bit(R5_LOCKED, &dev->flags); 2769 set_bit(R5_Wantwrite, &dev->flags); 2770 } 2771 if (s->failed >= 1) { 2772 dev = &sh->dev[r6s->failed_num[0]]; 2773 s->locked++; 2774 set_bit(R5_LOCKED, &dev->flags); 2775 set_bit(R5_Wantwrite, &dev->flags); 2776 } 2777 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 2778 dev = &sh->dev[pd_idx]; 2779 s->locked++; 2780 set_bit(R5_LOCKED, &dev->flags); 2781 set_bit(R5_Wantwrite, &dev->flags); 2782 } 2783 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 2784 dev = &sh->dev[qd_idx]; 2785 s->locked++; 2786 set_bit(R5_LOCKED, &dev->flags); 2787 set_bit(R5_Wantwrite, &dev->flags); 2788 } 2789 clear_bit(STRIPE_DEGRADED, &sh->state); 2790 2791 set_bit(STRIPE_INSYNC, &sh->state); 2792 break; 2793 case check_state_run: 2794 case check_state_run_q: 2795 case check_state_run_pq: 2796 break; /* we will be called again upon completion */ 2797 case check_state_check_result: 2798 sh->check_state = check_state_idle; 2799 2800 /* handle a successful check operation, if parity is correct 2801 * we are done. Otherwise update the mismatch count and repair 2802 * parity if !MD_RECOVERY_CHECK 2803 */ 2804 if (sh->ops.zero_sum_result == 0) { 2805 /* both parities are correct */ 2806 if (!s->failed) 2807 set_bit(STRIPE_INSYNC, &sh->state); 2808 else { 2809 /* in contrast to the raid5 case we can validate 2810 * parity, but still have a failure to write 2811 * back 2812 */ 2813 sh->check_state = check_state_compute_result; 2814 /* Returning at this point means that we may go 2815 * off and bring p and/or q uptodate again so 2816 * we make sure to check zero_sum_result again 2817 * to verify if p or q need writeback 2818 */ 2819 } 2820 } else { 2821 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2822 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2823 /* don't try to repair!! */ 2824 set_bit(STRIPE_INSYNC, &sh->state); 2825 else { 2826 int *target = &sh->ops.target; 2827 2828 sh->ops.target = -1; 2829 sh->ops.target2 = -1; 2830 sh->check_state = check_state_compute_run; 2831 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2832 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2833 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 2834 set_bit(R5_Wantcompute, 2835 &sh->dev[pd_idx].flags); 2836 *target = pd_idx; 2837 target = &sh->ops.target2; 2838 s->uptodate++; 2839 } 2840 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 2841 set_bit(R5_Wantcompute, 2842 &sh->dev[qd_idx].flags); 2843 *target = qd_idx; 2844 s->uptodate++; 2845 } 2846 } 2847 } 2848 break; 2849 case check_state_compute_run: 2850 break; 2851 default: 2852 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2853 __func__, sh->check_state, 2854 (unsigned long long) sh->sector); 2855 BUG(); 2856 } 2857} 2858 2859static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2860 struct r6_state *r6s) 2861{ 2862 int i; 2863 2864 /* We have read all the blocks in this stripe and now we need to 2865 * copy some of them into a target stripe for expand. 2866 */ 2867 struct dma_async_tx_descriptor *tx = NULL; 2868 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2869 for (i = 0; i < sh->disks; i++) 2870 if (i != sh->pd_idx && i != sh->qd_idx) { 2871 int dd_idx, j; 2872 struct stripe_head *sh2; 2873 struct async_submit_ctl submit; 2874 2875 sector_t bn = compute_blocknr(sh, i, 1); 2876 sector_t s = raid5_compute_sector(conf, bn, 0, 2877 &dd_idx, NULL); 2878 sh2 = get_active_stripe(conf, s, 0, 1, 1); 2879 if (sh2 == NULL) 2880 /* so far only the early blocks of this stripe 2881 * have been requested. When later blocks 2882 * get requested, we will try again 2883 */ 2884 continue; 2885 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2886 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2887 /* must have already done this block */ 2888 release_stripe(sh2); 2889 continue; 2890 } 2891 2892 /* place all the copies on one channel */ 2893 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 2894 tx = async_memcpy(sh2->dev[dd_idx].page, 2895 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2896 &submit); 2897 2898 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2899 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2900 for (j = 0; j < conf->raid_disks; j++) 2901 if (j != sh2->pd_idx && 2902 (!r6s || j != sh2->qd_idx) && 2903 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2904 break; 2905 if (j == conf->raid_disks) { 2906 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2907 set_bit(STRIPE_HANDLE, &sh2->state); 2908 } 2909 release_stripe(sh2); 2910 2911 } 2912 /* done submitting copies, wait for them to complete */ 2913 if (tx) { 2914 async_tx_ack(tx); 2915 dma_wait_for_async_tx(tx); 2916 } 2917} 2918 2919 2920/* 2921 * handle_stripe - do things to a stripe. 2922 * 2923 * We lock the stripe and then examine the state of various bits 2924 * to see what needs to be done. 2925 * Possible results: 2926 * return some read request which now have data 2927 * return some write requests which are safely on disc 2928 * schedule a read on some buffers 2929 * schedule a write of some buffers 2930 * return confirmation of parity correctness 2931 * 2932 * buffers are taken off read_list or write_list, and bh_cache buffers 2933 * get BH_Lock set before the stripe lock is released. 2934 * 2935 */ 2936 2937static void handle_stripe5(struct stripe_head *sh) 2938{ 2939 raid5_conf_t *conf = sh->raid_conf; 2940 int disks = sh->disks, i; 2941 struct bio *return_bi = NULL; 2942 struct stripe_head_state s; 2943 struct r5dev *dev; 2944 mdk_rdev_t *blocked_rdev = NULL; 2945 int prexor; 2946 int dec_preread_active = 0; 2947 2948 memset(&s, 0, sizeof(s)); 2949 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " 2950 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, 2951 atomic_read(&sh->count), sh->pd_idx, sh->check_state, 2952 sh->reconstruct_state); 2953 2954 spin_lock(&sh->lock); 2955 clear_bit(STRIPE_HANDLE, &sh->state); 2956 clear_bit(STRIPE_DELAYED, &sh->state); 2957 2958 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2959 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2960 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2961 2962 /* Now to look around and see what can be done */ 2963 rcu_read_lock(); 2964 for (i=disks; i--; ) { 2965 mdk_rdev_t *rdev; 2966 2967 dev = &sh->dev[i]; 2968 clear_bit(R5_Insync, &dev->flags); 2969 2970 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 2971 "written %p\n", i, dev->flags, dev->toread, dev->read, 2972 dev->towrite, dev->written); 2973 2974 /* maybe we can request a biofill operation 2975 * 2976 * new wantfill requests are only permitted while 2977 * ops_complete_biofill is guaranteed to be inactive 2978 */ 2979 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2980 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 2981 set_bit(R5_Wantfill, &dev->flags); 2982 2983 /* now count some things */ 2984 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2985 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2986 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2987 2988 if (test_bit(R5_Wantfill, &dev->flags)) 2989 s.to_fill++; 2990 else if (dev->toread) 2991 s.to_read++; 2992 if (dev->towrite) { 2993 s.to_write++; 2994 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2995 s.non_overwrite++; 2996 } 2997 if (dev->written) 2998 s.written++; 2999 rdev = rcu_dereference(conf->disks[i].rdev); 3000 if (blocked_rdev == NULL && 3001 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 3002 blocked_rdev = rdev; 3003 atomic_inc(&rdev->nr_pending); 3004 } 3005 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 3006 /* The ReadError flag will just be confusing now */ 3007 clear_bit(R5_ReadError, &dev->flags); 3008 clear_bit(R5_ReWrite, &dev->flags); 3009 } 3010 if (!rdev || !test_bit(In_sync, &rdev->flags) 3011 || test_bit(R5_ReadError, &dev->flags)) { 3012 s.failed++; 3013 s.failed_num = i; 3014 } else 3015 set_bit(R5_Insync, &dev->flags); 3016 } 3017 rcu_read_unlock(); 3018 3019 if (unlikely(blocked_rdev)) { 3020 if (s.syncing || s.expanding || s.expanded || 3021 s.to_write || s.written) { 3022 set_bit(STRIPE_HANDLE, &sh->state); 3023 goto unlock; 3024 } 3025 /* There is nothing for the blocked_rdev to block */ 3026 rdev_dec_pending(blocked_rdev, conf->mddev); 3027 blocked_rdev = NULL; 3028 } 3029 3030 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 3031 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 3032 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 3033 } 3034 3035 pr_debug("locked=%d uptodate=%d to_read=%d" 3036 " to_write=%d failed=%d failed_num=%d\n", 3037 s.locked, s.uptodate, s.to_read, s.to_write, 3038 s.failed, s.failed_num); 3039 /* check if the array has lost two devices and, if so, some requests might 3040 * need to be failed 3041 */ 3042 if (s.failed > 1 && s.to_read+s.to_write+s.written) 3043 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 3044 if (s.failed > 1 && s.syncing) { 3045 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3046 clear_bit(STRIPE_SYNCING, &sh->state); 3047 s.syncing = 0; 3048 } 3049 3050 /* might be able to return some write requests if the parity block 3051 * is safe, or on a failed drive 3052 */ 3053 dev = &sh->dev[sh->pd_idx]; 3054 if ( s.written && 3055 ((test_bit(R5_Insync, &dev->flags) && 3056 !test_bit(R5_LOCKED, &dev->flags) && 3057 test_bit(R5_UPTODATE, &dev->flags)) || 3058 (s.failed == 1 && s.failed_num == sh->pd_idx))) 3059 handle_stripe_clean_event(conf, sh, disks, &return_bi); 3060 3061 /* Now we might consider reading some blocks, either to check/generate 3062 * parity, or to satisfy requests 3063 * or to load a block that is being partially written. 3064 */ 3065 if (s.to_read || s.non_overwrite || 3066 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 3067 handle_stripe_fill5(sh, &s, disks); 3068 3069 /* Now we check to see if any write operations have recently 3070 * completed 3071 */ 3072 prexor = 0; 3073 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 3074 prexor = 1; 3075 if (sh->reconstruct_state == reconstruct_state_drain_result || 3076 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 3077 sh->reconstruct_state = reconstruct_state_idle; 3078 3079 /* All the 'written' buffers and the parity block are ready to 3080 * be written back to disk 3081 */ 3082 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 3083 for (i = disks; i--; ) { 3084 dev = &sh->dev[i]; 3085 if (test_bit(R5_LOCKED, &dev->flags) && 3086 (i == sh->pd_idx || dev->written)) { 3087 pr_debug("Writing block %d\n", i); 3088 set_bit(R5_Wantwrite, &dev->flags); 3089 if (prexor) 3090 continue; 3091 if (!test_bit(R5_Insync, &dev->flags) || 3092 (i == sh->pd_idx && s.failed == 0)) 3093 set_bit(STRIPE_INSYNC, &sh->state); 3094 } 3095 } 3096 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3097 dec_preread_active = 1; 3098 } 3099 3100 /* Now to consider new write requests and what else, if anything 3101 * should be read. We do not handle new writes when: 3102 * 1/ A 'write' operation (copy+xor) is already in flight. 3103 * 2/ A 'check' operation is in flight, as it may clobber the parity 3104 * block. 3105 */ 3106 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 3107 handle_stripe_dirtying5(conf, sh, &s, disks); 3108 3109 /* maybe we need to check and possibly fix the parity for this stripe 3110 * Any reads will already have been scheduled, so we just see if enough 3111 * data is available. The parity check is held off while parity 3112 * dependent operations are in flight. 3113 */ 3114 if (sh->check_state || 3115 (s.syncing && s.locked == 0 && 3116 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 3117 !test_bit(STRIPE_INSYNC, &sh->state))) 3118 handle_parity_checks5(conf, sh, &s, disks); 3119 3120 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3121 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3122 clear_bit(STRIPE_SYNCING, &sh->state); 3123 } 3124 3125 /* If the failed drive is just a ReadError, then we might need to progress 3126 * the repair/check process 3127 */ 3128 if (s.failed == 1 && !conf->mddev->ro && 3129 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) 3130 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) 3131 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) 3132 ) { 3133 dev = &sh->dev[s.failed_num]; 3134 if (!test_bit(R5_ReWrite, &dev->flags)) { 3135 set_bit(R5_Wantwrite, &dev->flags); 3136 set_bit(R5_ReWrite, &dev->flags); 3137 set_bit(R5_LOCKED, &dev->flags); 3138 s.locked++; 3139 } else { 3140 /* let's read it back */ 3141 set_bit(R5_Wantread, &dev->flags); 3142 set_bit(R5_LOCKED, &dev->flags); 3143 s.locked++; 3144 } 3145 } 3146 3147 /* Finish reconstruct operations initiated by the expansion process */ 3148 if (sh->reconstruct_state == reconstruct_state_result) { 3149 struct stripe_head *sh2 3150 = get_active_stripe(conf, sh->sector, 1, 1, 1); 3151 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 3152 /* sh cannot be written until sh2 has been read. 3153 * so arrange for sh to be delayed a little 3154 */ 3155 set_bit(STRIPE_DELAYED, &sh->state); 3156 set_bit(STRIPE_HANDLE, &sh->state); 3157 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 3158 &sh2->state)) 3159 atomic_inc(&conf->preread_active_stripes); 3160 release_stripe(sh2); 3161 goto unlock; 3162 } 3163 if (sh2) 3164 release_stripe(sh2); 3165 3166 sh->reconstruct_state = reconstruct_state_idle; 3167 clear_bit(STRIPE_EXPANDING, &sh->state); 3168 for (i = conf->raid_disks; i--; ) { 3169 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3170 set_bit(R5_LOCKED, &sh->dev[i].flags); 3171 s.locked++; 3172 } 3173 } 3174 3175 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 3176 !sh->reconstruct_state) { 3177 /* Need to write out all blocks after computing parity */ 3178 sh->disks = conf->raid_disks; 3179 stripe_set_idx(sh->sector, conf, 0, sh); 3180 schedule_reconstruction(sh, &s, 1, 1); 3181 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 3182 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3183 atomic_dec(&conf->reshape_stripes); 3184 wake_up(&conf->wait_for_overlap); 3185 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3186 } 3187 3188 if (s.expanding && s.locked == 0 && 3189 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3190 handle_stripe_expansion(conf, sh, NULL); 3191 3192 unlock: 3193 spin_unlock(&sh->lock); 3194 3195 /* wait for this device to become unblocked */ 3196 if (unlikely(blocked_rdev)) 3197 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3198 3199 if (s.ops_request) 3200 raid_run_ops(sh, s.ops_request); 3201 3202 ops_run_io(sh, &s); 3203 3204 if (dec_preread_active) { 3205 /* We delay this until after ops_run_io so that if make_request 3206 * is waiting on a barrier, it won't continue until the writes 3207 * have actually been submitted. 3208 */ 3209 atomic_dec(&conf->preread_active_stripes); 3210 if (atomic_read(&conf->preread_active_stripes) < 3211 IO_THRESHOLD) 3212 md_wakeup_thread(conf->mddev->thread); 3213 } 3214 return_io(return_bi); 3215} 3216 3217static void handle_stripe6(struct stripe_head *sh) 3218{ 3219 raid5_conf_t *conf = sh->raid_conf; 3220 int disks = sh->disks; 3221 struct bio *return_bi = NULL; 3222 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; 3223 struct stripe_head_state s; 3224 struct r6_state r6s; 3225 struct r5dev *dev, *pdev, *qdev; 3226 mdk_rdev_t *blocked_rdev = NULL; 3227 int dec_preread_active = 0; 3228 3229 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3230 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 3231 (unsigned long long)sh->sector, sh->state, 3232 atomic_read(&sh->count), pd_idx, qd_idx, 3233 sh->check_state, sh->reconstruct_state); 3234 memset(&s, 0, sizeof(s)); 3235 3236 spin_lock(&sh->lock); 3237 clear_bit(STRIPE_HANDLE, &sh->state); 3238 clear_bit(STRIPE_DELAYED, &sh->state); 3239 3240 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 3241 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3242 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 3243 /* Now to look around and see what can be done */ 3244 3245 rcu_read_lock(); 3246 for (i=disks; i--; ) { 3247 mdk_rdev_t *rdev; 3248 dev = &sh->dev[i]; 3249 clear_bit(R5_Insync, &dev->flags); 3250 3251 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3252 i, dev->flags, dev->toread, dev->towrite, dev->written); 3253 /* maybe we can reply to a read 3254 * 3255 * new wantfill requests are only permitted while 3256 * ops_complete_biofill is guaranteed to be inactive 3257 */ 3258 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 3259 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 3260 set_bit(R5_Wantfill, &dev->flags); 3261 3262 /* now count some things */ 3263 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 3264 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 3265 if (test_bit(R5_Wantcompute, &dev->flags)) { 3266 s.compute++; 3267 BUG_ON(s.compute > 2); 3268 } 3269 3270 if (test_bit(R5_Wantfill, &dev->flags)) { 3271 s.to_fill++; 3272 } else if (dev->toread) 3273 s.to_read++; 3274 if (dev->towrite) { 3275 s.to_write++; 3276 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3277 s.non_overwrite++; 3278 } 3279 if (dev->written) 3280 s.written++; 3281 rdev = rcu_dereference(conf->disks[i].rdev); 3282 if (blocked_rdev == NULL && 3283 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 3284 blocked_rdev = rdev; 3285 atomic_inc(&rdev->nr_pending); 3286 } 3287 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 3288 /* The ReadError flag will just be confusing now */ 3289 clear_bit(R5_ReadError, &dev->flags); 3290 clear_bit(R5_ReWrite, &dev->flags); 3291 } 3292 if (!rdev || !test_bit(In_sync, &rdev->flags) 3293 || test_bit(R5_ReadError, &dev->flags)) { 3294 if (s.failed < 2) 3295 r6s.failed_num[s.failed] = i; 3296 s.failed++; 3297 } else 3298 set_bit(R5_Insync, &dev->flags); 3299 } 3300 rcu_read_unlock(); 3301 3302 if (unlikely(blocked_rdev)) { 3303 if (s.syncing || s.expanding || s.expanded || 3304 s.to_write || s.written) { 3305 set_bit(STRIPE_HANDLE, &sh->state); 3306 goto unlock; 3307 } 3308 /* There is nothing for the blocked_rdev to block */ 3309 rdev_dec_pending(blocked_rdev, conf->mddev); 3310 blocked_rdev = NULL; 3311 } 3312 3313 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 3314 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 3315 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 3316 } 3317 3318 pr_debug("locked=%d uptodate=%d to_read=%d" 3319 " to_write=%d failed=%d failed_num=%d,%d\n", 3320 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3321 r6s.failed_num[0], r6s.failed_num[1]); 3322 /* check if the array has lost >2 devices and, if so, some requests 3323 * might need to be failed 3324 */ 3325 if (s.failed > 2 && s.to_read+s.to_write+s.written) 3326 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 3327 if (s.failed > 2 && s.syncing) { 3328 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3329 clear_bit(STRIPE_SYNCING, &sh->state); 3330 s.syncing = 0; 3331 } 3332 3333 /* 3334 * might be able to return some write requests if the parity blocks 3335 * are safe, or on a failed drive 3336 */ 3337 pdev = &sh->dev[pd_idx]; 3338 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 3339 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 3340 qdev = &sh->dev[qd_idx]; 3341 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx) 3342 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx); 3343 3344 if ( s.written && 3345 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3346 && !test_bit(R5_LOCKED, &pdev->flags) 3347 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3348 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3349 && !test_bit(R5_LOCKED, &qdev->flags) 3350 && test_bit(R5_UPTODATE, &qdev->flags))))) 3351 handle_stripe_clean_event(conf, sh, disks, &return_bi); 3352 3353 /* Now we might consider reading some blocks, either to check/generate 3354 * parity, or to satisfy requests 3355 * or to load a block that is being partially written. 3356 */ 3357 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 3358 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 3359 handle_stripe_fill6(sh, &s, &r6s, disks); 3360 3361 /* Now we check to see if any write operations have recently 3362 * completed 3363 */ 3364 if (sh->reconstruct_state == reconstruct_state_drain_result) { 3365 3366 sh->reconstruct_state = reconstruct_state_idle; 3367 /* All the 'written' buffers and the parity blocks are ready to 3368 * be written back to disk 3369 */ 3370 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 3371 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags)); 3372 for (i = disks; i--; ) { 3373 dev = &sh->dev[i]; 3374 if (test_bit(R5_LOCKED, &dev->flags) && 3375 (i == sh->pd_idx || i == qd_idx || 3376 dev->written)) { 3377 pr_debug("Writing block %d\n", i); 3378 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 3379 set_bit(R5_Wantwrite, &dev->flags); 3380 if (!test_bit(R5_Insync, &dev->flags) || 3381 ((i == sh->pd_idx || i == qd_idx) && 3382 s.failed == 0)) 3383 set_bit(STRIPE_INSYNC, &sh->state); 3384 } 3385 } 3386 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3387 dec_preread_active = 1; 3388 } 3389 3390 /* Now to consider new write requests and what else, if anything 3391 * should be read. We do not handle new writes when: 3392 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight. 3393 * 2/ A 'check' operation is in flight, as it may clobber the parity 3394 * block. 3395 */ 3396 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 3397 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); 3398 3399 /* maybe we need to check and possibly fix the parity for this stripe 3400 * Any reads will already have been scheduled, so we just see if enough 3401 * data is available. The parity check is held off while parity 3402 * dependent operations are in flight. 3403 */ 3404 if (sh->check_state || 3405 (s.syncing && s.locked == 0 && 3406 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 3407 !test_bit(STRIPE_INSYNC, &sh->state))) 3408 handle_parity_checks6(conf, sh, &s, &r6s, disks); 3409 3410 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3411 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3412 clear_bit(STRIPE_SYNCING, &sh->state); 3413 } 3414 3415 /* If the failed drives are just a ReadError, then we might need 3416 * to progress the repair/check process 3417 */ 3418 if (s.failed <= 2 && !conf->mddev->ro) 3419 for (i = 0; i < s.failed; i++) { 3420 dev = &sh->dev[r6s.failed_num[i]]; 3421 if (test_bit(R5_ReadError, &dev->flags) 3422 && !test_bit(R5_LOCKED, &dev->flags) 3423 && test_bit(R5_UPTODATE, &dev->flags) 3424 ) { 3425 if (!test_bit(R5_ReWrite, &dev->flags)) { 3426 set_bit(R5_Wantwrite, &dev->flags); 3427 set_bit(R5_ReWrite, &dev->flags); 3428 set_bit(R5_LOCKED, &dev->flags); 3429 s.locked++; 3430 } else { 3431 /* let's read it back */ 3432 set_bit(R5_Wantread, &dev->flags); 3433 set_bit(R5_LOCKED, &dev->flags); 3434 s.locked++; 3435 } 3436 } 3437 } 3438 3439 /* Finish reconstruct operations initiated by the expansion process */ 3440 if (sh->reconstruct_state == reconstruct_state_result) { 3441 sh->reconstruct_state = reconstruct_state_idle; 3442 clear_bit(STRIPE_EXPANDING, &sh->state); 3443 for (i = conf->raid_disks; i--; ) { 3444 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3445 set_bit(R5_LOCKED, &sh->dev[i].flags); 3446 s.locked++; 3447 } 3448 } 3449 3450 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 3451 !sh->reconstruct_state) { 3452 struct stripe_head *sh2 3453 = get_active_stripe(conf, sh->sector, 1, 1, 1); 3454 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 3455 /* sh cannot be written until sh2 has been read. 3456 * so arrange for sh to be delayed a little 3457 */ 3458 set_bit(STRIPE_DELAYED, &sh->state); 3459 set_bit(STRIPE_HANDLE, &sh->state); 3460 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 3461 &sh2->state)) 3462 atomic_inc(&conf->preread_active_stripes); 3463 release_stripe(sh2); 3464 goto unlock; 3465 } 3466 if (sh2) 3467 release_stripe(sh2); 3468 3469 /* Need to write out all blocks after computing P&Q */ 3470 sh->disks = conf->raid_disks; 3471 stripe_set_idx(sh->sector, conf, 0, sh); 3472 schedule_reconstruction(sh, &s, 1, 1); 3473 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 3474 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3475 atomic_dec(&conf->reshape_stripes); 3476 wake_up(&conf->wait_for_overlap); 3477 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3478 } 3479 3480 if (s.expanding && s.locked == 0 && 3481 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3482 handle_stripe_expansion(conf, sh, &r6s); 3483 3484 unlock: 3485 spin_unlock(&sh->lock); 3486 3487 /* wait for this device to become unblocked */ 3488 if (unlikely(blocked_rdev)) 3489 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3490 3491 if (s.ops_request) 3492 raid_run_ops(sh, s.ops_request); 3493 3494 ops_run_io(sh, &s); 3495 3496 3497 if (dec_preread_active) { 3498 /* We delay this until after ops_run_io so that if make_request 3499 * is waiting on a barrier, it won't continue until the writes 3500 * have actually been submitted. 3501 */ 3502 atomic_dec(&conf->preread_active_stripes); 3503 if (atomic_read(&conf->preread_active_stripes) < 3504 IO_THRESHOLD) 3505 md_wakeup_thread(conf->mddev->thread); 3506 } 3507 3508 return_io(return_bi); 3509} 3510 3511static void handle_stripe(struct stripe_head *sh) 3512{ 3513 if (sh->raid_conf->level == 6) 3514 handle_stripe6(sh); 3515 else 3516 handle_stripe5(sh); 3517} 3518 3519static void raid5_activate_delayed(raid5_conf_t *conf) 3520{ 3521 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3522 while (!list_empty(&conf->delayed_list)) { 3523 struct list_head *l = conf->delayed_list.next; 3524 struct stripe_head *sh; 3525 sh = list_entry(l, struct stripe_head, lru); 3526 list_del_init(l); 3527 clear_bit(STRIPE_DELAYED, &sh->state); 3528 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3529 atomic_inc(&conf->preread_active_stripes); 3530 list_add_tail(&sh->lru, &conf->hold_list); 3531 } 3532 } else 3533 blk_plug_device(conf->mddev->queue); 3534} 3535 3536static void activate_bit_delay(raid5_conf_t *conf) 3537{ 3538 /* device_lock is held */ 3539 struct list_head head; 3540 list_add(&head, &conf->bitmap_list); 3541 list_del_init(&conf->bitmap_list); 3542 while (!list_empty(&head)) { 3543 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3544 list_del_init(&sh->lru); 3545 atomic_inc(&sh->count); 3546 __release_stripe(conf, sh); 3547 } 3548} 3549 3550static void unplug_slaves(mddev_t *mddev) 3551{ 3552 raid5_conf_t *conf = mddev->private; 3553 int i; 3554 int devs = max(conf->raid_disks, conf->previous_raid_disks); 3555 3556 rcu_read_lock(); 3557 for (i = 0; i < devs; i++) { 3558 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3559 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3560 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3561 3562 atomic_inc(&rdev->nr_pending); 3563 rcu_read_unlock(); 3564 3565 blk_unplug(r_queue); 3566 3567 rdev_dec_pending(rdev, mddev); 3568 rcu_read_lock(); 3569 } 3570 } 3571 rcu_read_unlock(); 3572} 3573 3574static void raid5_unplug_device(struct request_queue *q) 3575{ 3576 mddev_t *mddev = q->queuedata; 3577 raid5_conf_t *conf = mddev->private; 3578 unsigned long flags; 3579 3580 spin_lock_irqsave(&conf->device_lock, flags); 3581 3582 if (blk_remove_plug(q)) { 3583 conf->seq_flush++; 3584 raid5_activate_delayed(conf); 3585 } 3586 md_wakeup_thread(mddev->thread); 3587 3588 spin_unlock_irqrestore(&conf->device_lock, flags); 3589 3590 unplug_slaves(mddev); 3591} 3592 3593static int raid5_congested(void *data, int bits) 3594{ 3595 mddev_t *mddev = data; 3596 raid5_conf_t *conf = mddev->private; 3597 3598 /* No difference between reads and writes. Just check 3599 * how busy the stripe_cache is 3600 */ 3601 3602 if (mddev_congested(mddev, bits)) 3603 return 1; 3604 if (conf->inactive_blocked) 3605 return 1; 3606 if (conf->quiesce) 3607 return 1; 3608 if (list_empty_careful(&conf->inactive_list)) 3609 return 1; 3610 3611 return 0; 3612} 3613 3614/* We want read requests to align with chunks where possible, 3615 * but write requests don't need to. 3616 */ 3617static int raid5_mergeable_bvec(struct request_queue *q, 3618 struct bvec_merge_data *bvm, 3619 struct bio_vec *biovec) 3620{ 3621 mddev_t *mddev = q->queuedata; 3622 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3623 int max; 3624 unsigned int chunk_sectors = mddev->chunk_sectors; 3625 unsigned int bio_sectors = bvm->bi_size >> 9; 3626 3627 if ((bvm->bi_rw & 1) == WRITE) 3628 return biovec->bv_len; /* always allow writes to be mergeable */ 3629 3630 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3631 chunk_sectors = mddev->new_chunk_sectors; 3632 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3633 if (max < 0) max = 0; 3634 if (max <= biovec->bv_len && bio_sectors == 0) 3635 return biovec->bv_len; 3636 else 3637 return max; 3638} 3639 3640 3641static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3642{ 3643 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3644 unsigned int chunk_sectors = mddev->chunk_sectors; 3645 unsigned int bio_sectors = bio->bi_size >> 9; 3646 3647 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3648 chunk_sectors = mddev->new_chunk_sectors; 3649 return chunk_sectors >= 3650 ((sector & (chunk_sectors - 1)) + bio_sectors); 3651} 3652 3653/* 3654 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3655 * later sampled by raid5d. 3656 */ 3657static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) 3658{ 3659 unsigned long flags; 3660 3661 spin_lock_irqsave(&conf->device_lock, flags); 3662 3663 bi->bi_next = conf->retry_read_aligned_list; 3664 conf->retry_read_aligned_list = bi; 3665 3666 spin_unlock_irqrestore(&conf->device_lock, flags); 3667 md_wakeup_thread(conf->mddev->thread); 3668} 3669 3670 3671static struct bio *remove_bio_from_retry(raid5_conf_t *conf) 3672{ 3673 struct bio *bi; 3674 3675 bi = conf->retry_read_aligned; 3676 if (bi) { 3677 conf->retry_read_aligned = NULL; 3678 return bi; 3679 } 3680 bi = conf->retry_read_aligned_list; 3681 if(bi) { 3682 conf->retry_read_aligned_list = bi->bi_next; 3683 bi->bi_next = NULL; 3684 /* 3685 * this sets the active strip count to 1 and the processed 3686 * strip count to zero (upper 8 bits) 3687 */ 3688 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3689 } 3690 3691 return bi; 3692} 3693 3694 3695/* 3696 * The "raid5_align_endio" should check if the read succeeded and if it 3697 * did, call bio_endio on the original bio (having bio_put the new bio 3698 * first). 3699 * If the read failed.. 3700 */ 3701static void raid5_align_endio(struct bio *bi, int error) 3702{ 3703 struct bio* raid_bi = bi->bi_private; 3704 mddev_t *mddev; 3705 raid5_conf_t *conf; 3706 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3707 mdk_rdev_t *rdev; 3708 3709 bio_put(bi); 3710 3711 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3712 conf = mddev->private; 3713 rdev = (void*)raid_bi->bi_next; 3714 raid_bi->bi_next = NULL; 3715 3716 rdev_dec_pending(rdev, conf->mddev); 3717 3718 if (!error && uptodate) { 3719 bio_endio(raid_bi, 0); 3720 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3721 wake_up(&conf->wait_for_stripe); 3722 return; 3723 } 3724 3725 3726 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3727 3728 add_bio_to_retry(raid_bi, conf); 3729} 3730 3731static int bio_fits_rdev(struct bio *bi) 3732{ 3733 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3734 3735 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3736 return 0; 3737 blk_recount_segments(q, bi); 3738 if (bi->bi_phys_segments > queue_max_segments(q)) 3739 return 0; 3740 3741 if (q->merge_bvec_fn) 3742 /* it's too hard to apply the merge_bvec_fn at this stage, 3743 * just just give up 3744 */ 3745 return 0; 3746 3747 return 1; 3748} 3749 3750 3751static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3752{ 3753 mddev_t *mddev = q->queuedata; 3754 raid5_conf_t *conf = mddev->private; 3755 int dd_idx; 3756 struct bio* align_bi; 3757 mdk_rdev_t *rdev; 3758 3759 if (!in_chunk_boundary(mddev, raid_bio)) { 3760 pr_debug("chunk_aligned_read : non aligned\n"); 3761 return 0; 3762 } 3763 /* 3764 * use bio_clone to make a copy of the bio 3765 */ 3766 align_bi = bio_clone(raid_bio, GFP_NOIO); 3767 if (!align_bi) 3768 return 0; 3769 /* 3770 * set bi_end_io to a new function, and set bi_private to the 3771 * original bio. 3772 */ 3773 align_bi->bi_end_io = raid5_align_endio; 3774 align_bi->bi_private = raid_bio; 3775 /* 3776 * compute position 3777 */ 3778 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 3779 0, 3780 &dd_idx, NULL); 3781 3782 rcu_read_lock(); 3783 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3784 if (rdev && test_bit(In_sync, &rdev->flags)) { 3785 atomic_inc(&rdev->nr_pending); 3786 rcu_read_unlock(); 3787 raid_bio->bi_next = (void*)rdev; 3788 align_bi->bi_bdev = rdev->bdev; 3789 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3790 align_bi->bi_sector += rdev->data_offset; 3791 3792 if (!bio_fits_rdev(align_bi)) { 3793 /* too big in some way */ 3794 bio_put(align_bi); 3795 rdev_dec_pending(rdev, mddev); 3796 return 0; 3797 } 3798 3799 spin_lock_irq(&conf->device_lock); 3800 wait_event_lock_irq(conf->wait_for_stripe, 3801 conf->quiesce == 0, 3802 conf->device_lock, /* nothing */); 3803 atomic_inc(&conf->active_aligned_reads); 3804 spin_unlock_irq(&conf->device_lock); 3805 3806 generic_make_request(align_bi); 3807 return 1; 3808 } else { 3809 rcu_read_unlock(); 3810 bio_put(align_bi); 3811 return 0; 3812 } 3813} 3814 3815/* __get_priority_stripe - get the next stripe to process 3816 * 3817 * Full stripe writes are allowed to pass preread active stripes up until 3818 * the bypass_threshold is exceeded. In general the bypass_count 3819 * increments when the handle_list is handled before the hold_list; however, it 3820 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 3821 * stripe with in flight i/o. The bypass_count will be reset when the 3822 * head of the hold_list has changed, i.e. the head was promoted to the 3823 * handle_list. 3824 */ 3825static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) 3826{ 3827 struct stripe_head *sh; 3828 3829 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 3830 __func__, 3831 list_empty(&conf->handle_list) ? "empty" : "busy", 3832 list_empty(&conf->hold_list) ? "empty" : "busy", 3833 atomic_read(&conf->pending_full_writes), conf->bypass_count); 3834 3835 if (!list_empty(&conf->handle_list)) { 3836 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); 3837 3838 if (list_empty(&conf->hold_list)) 3839 conf->bypass_count = 0; 3840 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 3841 if (conf->hold_list.next == conf->last_hold) 3842 conf->bypass_count++; 3843 else { 3844 conf->last_hold = conf->hold_list.next; 3845 conf->bypass_count -= conf->bypass_threshold; 3846 if (conf->bypass_count < 0) 3847 conf->bypass_count = 0; 3848 } 3849 } 3850 } else if (!list_empty(&conf->hold_list) && 3851 ((conf->bypass_threshold && 3852 conf->bypass_count > conf->bypass_threshold) || 3853 atomic_read(&conf->pending_full_writes) == 0)) { 3854 sh = list_entry(conf->hold_list.next, 3855 typeof(*sh), lru); 3856 conf->bypass_count -= conf->bypass_threshold; 3857 if (conf->bypass_count < 0) 3858 conf->bypass_count = 0; 3859 } else 3860 return NULL; 3861 3862 list_del_init(&sh->lru); 3863 atomic_inc(&sh->count); 3864 BUG_ON(atomic_read(&sh->count) != 1); 3865 return sh; 3866} 3867 3868static int make_request(struct request_queue *q, struct bio * bi) 3869{ 3870 mddev_t *mddev = q->queuedata; 3871 raid5_conf_t *conf = mddev->private; 3872 int dd_idx; 3873 sector_t new_sector; 3874 sector_t logical_sector, last_sector; 3875 struct stripe_head *sh; 3876 const int rw = bio_data_dir(bi); 3877 int cpu, remaining; 3878 3879 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { 3880 /* Drain all pending writes. We only really need 3881 * to ensure they have been submitted, but this is 3882 * easier. 3883 */ 3884 mddev->pers->quiesce(mddev, 1); 3885 mddev->pers->quiesce(mddev, 0); 3886 md_barrier_request(mddev, bi); 3887 return 0; 3888 } 3889 3890 md_write_start(mddev, bi); 3891 3892 cpu = part_stat_lock(); 3893 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 3894 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 3895 bio_sectors(bi)); 3896 part_stat_unlock(); 3897 3898 if (rw == READ && 3899 mddev->reshape_position == MaxSector && 3900 chunk_aligned_read(q,bi)) 3901 return 0; 3902 3903 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3904 last_sector = bi->bi_sector + (bi->bi_size>>9); 3905 bi->bi_next = NULL; 3906 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3907 3908 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3909 DEFINE_WAIT(w); 3910 int disks, data_disks; 3911 int previous; 3912 3913 retry: 3914 previous = 0; 3915 disks = conf->raid_disks; 3916 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3917 if (unlikely(conf->reshape_progress != MaxSector)) { 3918 /* spinlock is needed as reshape_progress may be 3919 * 64bit on a 32bit platform, and so it might be 3920 * possible to see a half-updated value 3921 * Ofcourse reshape_progress could change after 3922 * the lock is dropped, so once we get a reference 3923 * to the stripe that we think it is, we will have 3924 * to check again. 3925 */ 3926 spin_lock_irq(&conf->device_lock); 3927 if (mddev->delta_disks < 0 3928 ? logical_sector < conf->reshape_progress 3929 : logical_sector >= conf->reshape_progress) { 3930 disks = conf->previous_raid_disks; 3931 previous = 1; 3932 } else { 3933 if (mddev->delta_disks < 0 3934 ? logical_sector < conf->reshape_safe 3935 : logical_sector >= conf->reshape_safe) { 3936 spin_unlock_irq(&conf->device_lock); 3937 schedule(); 3938 goto retry; 3939 } 3940 } 3941 spin_unlock_irq(&conf->device_lock); 3942 } 3943 data_disks = disks - conf->max_degraded; 3944 3945 new_sector = raid5_compute_sector(conf, logical_sector, 3946 previous, 3947 &dd_idx, NULL); 3948 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3949 (unsigned long long)new_sector, 3950 (unsigned long long)logical_sector); 3951 3952 sh = get_active_stripe(conf, new_sector, previous, 3953 (bi->bi_rw&RWA_MASK), 0); 3954 if (sh) { 3955 if (unlikely(previous)) { 3956 /* expansion might have moved on while waiting for a 3957 * stripe, so we must do the range check again. 3958 * Expansion could still move past after this 3959 * test, but as we are holding a reference to 3960 * 'sh', we know that if that happens, 3961 * STRIPE_EXPANDING will get set and the expansion 3962 * won't proceed until we finish with the stripe. 3963 */ 3964 int must_retry = 0; 3965 spin_lock_irq(&conf->device_lock); 3966 if (mddev->delta_disks < 0 3967 ? logical_sector >= conf->reshape_progress 3968 : logical_sector < conf->reshape_progress) 3969 /* mismatch, need to try again */ 3970 must_retry = 1; 3971 spin_unlock_irq(&conf->device_lock); 3972 if (must_retry) { 3973 release_stripe(sh); 3974 schedule(); 3975 goto retry; 3976 } 3977 } 3978 3979 if (bio_data_dir(bi) == WRITE && 3980 logical_sector >= mddev->suspend_lo && 3981 logical_sector < mddev->suspend_hi) { 3982 release_stripe(sh); 3983 /* As the suspend_* range is controlled by 3984 * userspace, we want an interruptible 3985 * wait. 3986 */ 3987 flush_signals(current); 3988 prepare_to_wait(&conf->wait_for_overlap, 3989 &w, TASK_INTERRUPTIBLE); 3990 if (logical_sector >= mddev->suspend_lo && 3991 logical_sector < mddev->suspend_hi) 3992 schedule(); 3993 goto retry; 3994 } 3995 3996 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3997 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3998 /* Stripe is busy expanding or 3999 * add failed due to overlap. Flush everything 4000 * and wait a while 4001 */ 4002 raid5_unplug_device(mddev->queue); 4003 release_stripe(sh); 4004 schedule(); 4005 goto retry; 4006 } 4007 finish_wait(&conf->wait_for_overlap, &w); 4008 set_bit(STRIPE_HANDLE, &sh->state); 4009 clear_bit(STRIPE_DELAYED, &sh->state); 4010 if (mddev->barrier && 4011 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4012 atomic_inc(&conf->preread_active_stripes); 4013 release_stripe(sh); 4014 } else { 4015 /* cannot get stripe for read-ahead, just give-up */ 4016 clear_bit(BIO_UPTODATE, &bi->bi_flags); 4017 finish_wait(&conf->wait_for_overlap, &w); 4018 break; 4019 } 4020 4021 } 4022 spin_lock_irq(&conf->device_lock); 4023 remaining = raid5_dec_bi_phys_segments(bi); 4024 spin_unlock_irq(&conf->device_lock); 4025 if (remaining == 0) { 4026 4027 if ( rw == WRITE ) 4028 md_write_end(mddev); 4029 4030 bio_endio(bi, 0); 4031 } 4032 4033 if (mddev->barrier) { 4034 /* We need to wait for the stripes to all be handled. 4035 * So: wait for preread_active_stripes to drop to 0. 4036 */ 4037 wait_event(mddev->thread->wqueue, 4038 atomic_read(&conf->preread_active_stripes) == 0); 4039 } 4040 return 0; 4041} 4042 4043static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); 4044 4045static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 4046{ 4047 /* reshaping is quite different to recovery/resync so it is 4048 * handled quite separately ... here. 4049 * 4050 * On each call to sync_request, we gather one chunk worth of 4051 * destination stripes and flag them as expanding. 4052 * Then we find all the source stripes and request reads. 4053 * As the reads complete, handle_stripe will copy the data 4054 * into the destination stripe and release that stripe. 4055 */ 4056 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4057 struct stripe_head *sh; 4058 sector_t first_sector, last_sector; 4059 int raid_disks = conf->previous_raid_disks; 4060 int data_disks = raid_disks - conf->max_degraded; 4061 int new_data_disks = conf->raid_disks - conf->max_degraded; 4062 int i; 4063 int dd_idx; 4064 sector_t writepos, readpos, safepos; 4065 sector_t stripe_addr; 4066 int reshape_sectors; 4067 struct list_head stripes; 4068 4069 if (sector_nr == 0) { 4070 /* If restarting in the middle, skip the initial sectors */ 4071 if (mddev->delta_disks < 0 && 4072 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 4073 sector_nr = raid5_size(mddev, 0, 0) 4074 - conf->reshape_progress; 4075 } else if (mddev->delta_disks >= 0 && 4076 conf->reshape_progress > 0) 4077 sector_nr = conf->reshape_progress; 4078 sector_div(sector_nr, new_data_disks); 4079 if (sector_nr) { 4080 mddev->curr_resync_completed = sector_nr; 4081 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4082 *skipped = 1; 4083 return sector_nr; 4084 } 4085 } 4086 4087 /* We need to process a full chunk at a time. 4088 * If old and new chunk sizes differ, we need to process the 4089 * largest of these 4090 */ 4091 if (mddev->new_chunk_sectors > mddev->chunk_sectors) 4092 reshape_sectors = mddev->new_chunk_sectors; 4093 else 4094 reshape_sectors = mddev->chunk_sectors; 4095 4096 /* we update the metadata when there is more than 3Meg 4097 * in the block range (that is rather arbitrary, should 4098 * probably be time based) or when the data about to be 4099 * copied would over-write the source of the data at 4100 * the front of the range. 4101 * i.e. one new_stripe along from reshape_progress new_maps 4102 * to after where reshape_safe old_maps to 4103 */ 4104 writepos = conf->reshape_progress; 4105 sector_div(writepos, new_data_disks); 4106 readpos = conf->reshape_progress; 4107 sector_div(readpos, data_disks); 4108 safepos = conf->reshape_safe; 4109 sector_div(safepos, data_disks); 4110 if (mddev->delta_disks < 0) { 4111 writepos -= min_t(sector_t, reshape_sectors, writepos); 4112 readpos += reshape_sectors; 4113 safepos += reshape_sectors; 4114 } else { 4115 writepos += reshape_sectors; 4116 readpos -= min_t(sector_t, reshape_sectors, readpos); 4117 safepos -= min_t(sector_t, reshape_sectors, safepos); 4118 } 4119 4120 /* 'writepos' is the most advanced device address we might write. 4121 * 'readpos' is the least advanced device address we might read. 4122 * 'safepos' is the least address recorded in the metadata as having 4123 * been reshaped. 4124 * If 'readpos' is behind 'writepos', then there is no way that we can 4125 * ensure safety in the face of a crash - that must be done by userspace 4126 * making a backup of the data. So in that case there is no particular 4127 * rush to update metadata. 4128 * Otherwise if 'safepos' is behind 'writepos', then we really need to 4129 * update the metadata to advance 'safepos' to match 'readpos' so that 4130 * we can be safe in the event of a crash. 4131 * So we insist on updating metadata if safepos is behind writepos and 4132 * readpos is beyond writepos. 4133 * In any case, update the metadata every 10 seconds. 4134 * Maybe that number should be configurable, but I'm not sure it is 4135 * worth it.... maybe it could be a multiple of safemode_delay??? 4136 */ 4137 if ((mddev->delta_disks < 0 4138 ? (safepos > writepos && readpos < writepos) 4139 : (safepos < writepos && readpos > writepos)) || 4140 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4141 /* Cannot proceed until we've updated the superblock... */ 4142 wait_event(conf->wait_for_overlap, 4143 atomic_read(&conf->reshape_stripes)==0); 4144 mddev->reshape_position = conf->reshape_progress; 4145 mddev->curr_resync_completed = mddev->curr_resync; 4146 conf->reshape_checkpoint = jiffies; 4147 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4148 md_wakeup_thread(mddev->thread); 4149 wait_event(mddev->sb_wait, mddev->flags == 0 || 4150 kthread_should_stop()); 4151 spin_lock_irq(&conf->device_lock); 4152 conf->reshape_safe = mddev->reshape_position; 4153 spin_unlock_irq(&conf->device_lock); 4154 wake_up(&conf->wait_for_overlap); 4155 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4156 } 4157 4158 if (mddev->delta_disks < 0) { 4159 BUG_ON(conf->reshape_progress == 0); 4160 stripe_addr = writepos; 4161 BUG_ON((mddev->dev_sectors & 4162 ~((sector_t)reshape_sectors - 1)) 4163 - reshape_sectors - stripe_addr 4164 != sector_nr); 4165 } else { 4166 BUG_ON(writepos != sector_nr + reshape_sectors); 4167 stripe_addr = sector_nr; 4168 } 4169 INIT_LIST_HEAD(&stripes); 4170 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 4171 int j; 4172 int skipped_disk = 0; 4173 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 4174 set_bit(STRIPE_EXPANDING, &sh->state); 4175 atomic_inc(&conf->reshape_stripes); 4176 /* If any of this stripe is beyond the end of the old 4177 * array, then we need to zero those blocks 4178 */ 4179 for (j=sh->disks; j--;) { 4180 sector_t s; 4181 if (j == sh->pd_idx) 4182 continue; 4183 if (conf->level == 6 && 4184 j == sh->qd_idx) 4185 continue; 4186 s = compute_blocknr(sh, j, 0); 4187 if (s < raid5_size(mddev, 0, 0)) { 4188 skipped_disk = 1; 4189 continue; 4190 } 4191 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 4192 set_bit(R5_Expanded, &sh->dev[j].flags); 4193 set_bit(R5_UPTODATE, &sh->dev[j].flags); 4194 } 4195 if (!skipped_disk) { 4196 set_bit(STRIPE_EXPAND_READY, &sh->state); 4197 set_bit(STRIPE_HANDLE, &sh->state); 4198 } 4199 list_add(&sh->lru, &stripes); 4200 } 4201 spin_lock_irq(&conf->device_lock); 4202 if (mddev->delta_disks < 0) 4203 conf->reshape_progress -= reshape_sectors * new_data_disks; 4204 else 4205 conf->reshape_progress += reshape_sectors * new_data_disks; 4206 spin_unlock_irq(&conf->device_lock); 4207 /* Ok, those stripe are ready. We can start scheduling 4208 * reads on the source stripes. 4209 * The source stripes are determined by mapping the first and last 4210 * block on the destination stripes. 4211 */ 4212 first_sector = 4213 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 4214 1, &dd_idx, NULL); 4215 last_sector = 4216 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 4217 * new_data_disks - 1), 4218 1, &dd_idx, NULL); 4219 if (last_sector >= mddev->dev_sectors) 4220 last_sector = mddev->dev_sectors - 1; 4221 while (first_sector <= last_sector) { 4222 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 4223 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 4224 set_bit(STRIPE_HANDLE, &sh->state); 4225 release_stripe(sh); 4226 first_sector += STRIPE_SECTORS; 4227 } 4228 /* Now that the sources are clearly marked, we can release 4229 * the destination stripes 4230 */ 4231 while (!list_empty(&stripes)) { 4232 sh = list_entry(stripes.next, struct stripe_head, lru); 4233 list_del_init(&sh->lru); 4234 release_stripe(sh); 4235 } 4236 /* If this takes us to the resync_max point where we have to pause, 4237 * then we need to write out the superblock. 4238 */ 4239 sector_nr += reshape_sectors; 4240 if ((sector_nr - mddev->curr_resync_completed) * 2 4241 >= mddev->resync_max - mddev->curr_resync_completed) { 4242 /* Cannot proceed until we've updated the superblock... */ 4243 wait_event(conf->wait_for_overlap, 4244 atomic_read(&conf->reshape_stripes) == 0); 4245 mddev->reshape_position = conf->reshape_progress; 4246 mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors; 4247 conf->reshape_checkpoint = jiffies; 4248 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4249 md_wakeup_thread(mddev->thread); 4250 wait_event(mddev->sb_wait, 4251 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 4252 || kthread_should_stop()); 4253 spin_lock_irq(&conf->device_lock); 4254 conf->reshape_safe = mddev->reshape_position; 4255 spin_unlock_irq(&conf->device_lock); 4256 wake_up(&conf->wait_for_overlap); 4257 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4258 } 4259 return reshape_sectors; 4260} 4261 4262/* FIXME go_faster isn't used */ 4263static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 4264{ 4265 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4266 struct stripe_head *sh; 4267 sector_t max_sector = mddev->dev_sectors; 4268 int sync_blocks; 4269 int still_degraded = 0; 4270 int i; 4271 4272 if (sector_nr >= max_sector) { 4273 /* just being told to finish up .. nothing much to do */ 4274 unplug_slaves(mddev); 4275 4276 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 4277 end_reshape(conf); 4278 return 0; 4279 } 4280 4281 if (mddev->curr_resync < max_sector) /* aborted */ 4282 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 4283 &sync_blocks, 1); 4284 else /* completed sync */ 4285 conf->fullsync = 0; 4286 bitmap_close_sync(mddev->bitmap); 4287 4288 return 0; 4289 } 4290 4291 /* Allow raid5_quiesce to complete */ 4292 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 4293 4294 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4295 return reshape_request(mddev, sector_nr, skipped); 4296 4297 /* No need to check resync_max as we never do more than one 4298 * stripe, and as resync_max will always be on a chunk boundary, 4299 * if the check in md_do_sync didn't fire, there is no chance 4300 * of overstepping resync_max here 4301 */ 4302 4303 /* if there is too many failed drives and we are trying 4304 * to resync, then assert that we are finished, because there is 4305 * nothing we can do. 4306 */ 4307 if (mddev->degraded >= conf->max_degraded && 4308 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4309 sector_t rv = mddev->dev_sectors - sector_nr; 4310 *skipped = 1; 4311 return rv; 4312 } 4313 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 4314 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 4315 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 4316 /* we can skip this block, and probably more */ 4317 sync_blocks /= STRIPE_SECTORS; 4318 *skipped = 1; 4319 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 4320 } 4321 4322 4323 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 4324 4325 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 4326 if (sh == NULL) { 4327 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 4328 /* make sure we don't swamp the stripe cache if someone else 4329 * is trying to get access 4330 */ 4331 schedule_timeout_uninterruptible(1); 4332 } 4333 /* Need to check if array will still be degraded after recovery/resync 4334 * We don't need to check the 'failed' flag as when that gets set, 4335 * recovery aborts. 4336 */ 4337 for (i = 0; i < conf->raid_disks; i++) 4338 if (conf->disks[i].rdev == NULL) 4339 still_degraded = 1; 4340 4341 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 4342 4343 spin_lock(&sh->lock); 4344 set_bit(STRIPE_SYNCING, &sh->state); 4345 clear_bit(STRIPE_INSYNC, &sh->state); 4346 spin_unlock(&sh->lock); 4347 4348 handle_stripe(sh); 4349 release_stripe(sh); 4350 4351 return STRIPE_SECTORS; 4352} 4353 4354static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) 4355{ 4356 /* We may not be able to submit a whole bio at once as there 4357 * may not be enough stripe_heads available. 4358 * We cannot pre-allocate enough stripe_heads as we may need 4359 * more than exist in the cache (if we allow ever large chunks). 4360 * So we do one stripe head at a time and record in 4361 * ->bi_hw_segments how many have been done. 4362 * 4363 * We *know* that this entire raid_bio is in one chunk, so 4364 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 4365 */ 4366 struct stripe_head *sh; 4367 int dd_idx; 4368 sector_t sector, logical_sector, last_sector; 4369 int scnt = 0; 4370 int remaining; 4371 int handled = 0; 4372 4373 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4374 sector = raid5_compute_sector(conf, logical_sector, 4375 0, &dd_idx, NULL); 4376 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 4377 4378 for (; logical_sector < last_sector; 4379 logical_sector += STRIPE_SECTORS, 4380 sector += STRIPE_SECTORS, 4381 scnt++) { 4382 4383 if (scnt < raid5_bi_hw_segments(raid_bio)) 4384 /* already done this stripe */ 4385 continue; 4386 4387 sh = get_active_stripe(conf, sector, 0, 1, 0); 4388 4389 if (!sh) { 4390 /* failed to get a stripe - must wait */ 4391 raid5_set_bi_hw_segments(raid_bio, scnt); 4392 conf->retry_read_aligned = raid_bio; 4393 return handled; 4394 } 4395 4396 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 4397 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 4398 release_stripe(sh); 4399 raid5_set_bi_hw_segments(raid_bio, scnt); 4400 conf->retry_read_aligned = raid_bio; 4401 return handled; 4402 } 4403 4404 handle_stripe(sh); 4405 release_stripe(sh); 4406 handled++; 4407 } 4408 spin_lock_irq(&conf->device_lock); 4409 remaining = raid5_dec_bi_phys_segments(raid_bio); 4410 spin_unlock_irq(&conf->device_lock); 4411 if (remaining == 0) 4412 bio_endio(raid_bio, 0); 4413 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4414 wake_up(&conf->wait_for_stripe); 4415 return handled; 4416} 4417 4418 4419/* 4420 * This is our raid5 kernel thread. 4421 * 4422 * We scan the hash table for stripes which can be handled now. 4423 * During the scan, completed stripes are saved for us by the interrupt 4424 * handler, so that they will not have to wait for our next wakeup. 4425 */ 4426static void raid5d(mddev_t *mddev) 4427{ 4428 struct stripe_head *sh; 4429 raid5_conf_t *conf = mddev->private; 4430 int handled; 4431 4432 pr_debug("+++ raid5d active\n"); 4433 4434 md_check_recovery(mddev); 4435 4436 handled = 0; 4437 spin_lock_irq(&conf->device_lock); 4438 while (1) { 4439 struct bio *bio; 4440 4441 if (conf->seq_flush != conf->seq_write) { 4442 int seq = conf->seq_flush; 4443 spin_unlock_irq(&conf->device_lock); 4444 bitmap_unplug(mddev->bitmap); 4445 spin_lock_irq(&conf->device_lock); 4446 conf->seq_write = seq; 4447 activate_bit_delay(conf); 4448 } 4449 4450 while ((bio = remove_bio_from_retry(conf))) { 4451 int ok; 4452 spin_unlock_irq(&conf->device_lock); 4453 ok = retry_aligned_read(conf, bio); 4454 spin_lock_irq(&conf->device_lock); 4455 if (!ok) 4456 break; 4457 handled++; 4458 } 4459 4460 sh = __get_priority_stripe(conf); 4461 4462 if (!sh) 4463 break; 4464 spin_unlock_irq(&conf->device_lock); 4465 4466 handled++; 4467 handle_stripe(sh); 4468 release_stripe(sh); 4469 cond_resched(); 4470 4471 spin_lock_irq(&conf->device_lock); 4472 } 4473 pr_debug("%d stripes handled\n", handled); 4474 4475 spin_unlock_irq(&conf->device_lock); 4476 4477 async_tx_issue_pending_all(); 4478 unplug_slaves(mddev); 4479 4480 pr_debug("--- raid5d inactive\n"); 4481} 4482 4483static ssize_t 4484raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 4485{ 4486 raid5_conf_t *conf = mddev->private; 4487 if (conf) 4488 return sprintf(page, "%d\n", conf->max_nr_stripes); 4489 else 4490 return 0; 4491} 4492 4493static ssize_t 4494raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 4495{ 4496 raid5_conf_t *conf = mddev->private; 4497 unsigned long new; 4498 int err; 4499 4500 if (len >= PAGE_SIZE) 4501 return -EINVAL; 4502 if (!conf) 4503 return -ENODEV; 4504 4505 if (strict_strtoul(page, 10, &new)) 4506 return -EINVAL; 4507 if (new <= 16 || new > 32768) 4508 return -EINVAL; 4509 while (new < conf->max_nr_stripes) { 4510 if (drop_one_stripe(conf)) 4511 conf->max_nr_stripes--; 4512 else 4513 break; 4514 } 4515 err = md_allow_write(mddev); 4516 if (err) 4517 return err; 4518 while (new > conf->max_nr_stripes) { 4519 if (grow_one_stripe(conf)) 4520 conf->max_nr_stripes++; 4521 else break; 4522 } 4523 return len; 4524} 4525 4526static struct md_sysfs_entry 4527raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4528 raid5_show_stripe_cache_size, 4529 raid5_store_stripe_cache_size); 4530 4531static ssize_t 4532raid5_show_preread_threshold(mddev_t *mddev, char *page) 4533{ 4534 raid5_conf_t *conf = mddev->private; 4535 if (conf) 4536 return sprintf(page, "%d\n", conf->bypass_threshold); 4537 else 4538 return 0; 4539} 4540 4541static ssize_t 4542raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) 4543{ 4544 raid5_conf_t *conf = mddev->private; 4545 unsigned long new; 4546 if (len >= PAGE_SIZE) 4547 return -EINVAL; 4548 if (!conf) 4549 return -ENODEV; 4550 4551 if (strict_strtoul(page, 10, &new)) 4552 return -EINVAL; 4553 if (new > conf->max_nr_stripes) 4554 return -EINVAL; 4555 conf->bypass_threshold = new; 4556 return len; 4557} 4558 4559static struct md_sysfs_entry 4560raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 4561 S_IRUGO | S_IWUSR, 4562 raid5_show_preread_threshold, 4563 raid5_store_preread_threshold); 4564 4565static ssize_t 4566stripe_cache_active_show(mddev_t *mddev, char *page) 4567{ 4568 raid5_conf_t *conf = mddev->private; 4569 if (conf) 4570 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4571 else 4572 return 0; 4573} 4574 4575static struct md_sysfs_entry 4576raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4577 4578static struct attribute *raid5_attrs[] = { 4579 &raid5_stripecache_size.attr, 4580 &raid5_stripecache_active.attr, 4581 &raid5_preread_bypass_threshold.attr, 4582 NULL, 4583}; 4584static struct attribute_group raid5_attrs_group = { 4585 .name = NULL, 4586 .attrs = raid5_attrs, 4587}; 4588 4589static sector_t 4590raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) 4591{ 4592 raid5_conf_t *conf = mddev->private; 4593 4594 if (!sectors) 4595 sectors = mddev->dev_sectors; 4596 if (!raid_disks) 4597 /* size is defined by the smallest of previous and new size */ 4598 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 4599 4600 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 4601 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 4602 return sectors * (raid_disks - conf->max_degraded); 4603} 4604 4605static void raid5_free_percpu(raid5_conf_t *conf) 4606{ 4607 struct raid5_percpu *percpu; 4608 unsigned long cpu; 4609 4610 if (!conf->percpu) 4611 return; 4612 4613 get_online_cpus(); 4614 for_each_possible_cpu(cpu) { 4615 percpu = per_cpu_ptr(conf->percpu, cpu); 4616 safe_put_page(percpu->spare_page); 4617 kfree(percpu->scribble); 4618 } 4619#ifdef CONFIG_HOTPLUG_CPU 4620 unregister_cpu_notifier(&conf->cpu_notify); 4621#endif 4622 put_online_cpus(); 4623 4624 free_percpu(conf->percpu); 4625} 4626 4627static void free_conf(raid5_conf_t *conf) 4628{ 4629 shrink_stripes(conf); 4630 raid5_free_percpu(conf); 4631 kfree(conf->disks); 4632 kfree(conf->stripe_hashtbl); 4633 kfree(conf); 4634} 4635 4636#ifdef CONFIG_HOTPLUG_CPU 4637static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, 4638 void *hcpu) 4639{ 4640 raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify); 4641 long cpu = (long)hcpu; 4642 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 4643 4644 switch (action) { 4645 case CPU_UP_PREPARE: 4646 case CPU_UP_PREPARE_FROZEN: 4647 if (conf->level == 6 && !percpu->spare_page) 4648 percpu->spare_page = alloc_page(GFP_KERNEL); 4649 if (!percpu->scribble) 4650 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); 4651 4652 if (!percpu->scribble || 4653 (conf->level == 6 && !percpu->spare_page)) { 4654 safe_put_page(percpu->spare_page); 4655 kfree(percpu->scribble); 4656 pr_err("%s: failed memory allocation for cpu%ld\n", 4657 __func__, cpu); 4658 return NOTIFY_BAD; 4659 } 4660 break; 4661 case CPU_DEAD: 4662 case CPU_DEAD_FROZEN: 4663 safe_put_page(percpu->spare_page); 4664 kfree(percpu->scribble); 4665 percpu->spare_page = NULL; 4666 percpu->scribble = NULL; 4667 break; 4668 default: 4669 break; 4670 } 4671 return NOTIFY_OK; 4672} 4673#endif 4674 4675static int raid5_alloc_percpu(raid5_conf_t *conf) 4676{ 4677 unsigned long cpu; 4678 struct page *spare_page; 4679 struct raid5_percpu __percpu *allcpus; 4680 void *scribble; 4681 int err; 4682 4683 allcpus = alloc_percpu(struct raid5_percpu); 4684 if (!allcpus) 4685 return -ENOMEM; 4686 conf->percpu = allcpus; 4687 4688 get_online_cpus(); 4689 err = 0; 4690 for_each_present_cpu(cpu) { 4691 if (conf->level == 6) { 4692 spare_page = alloc_page(GFP_KERNEL); 4693 if (!spare_page) { 4694 err = -ENOMEM; 4695 break; 4696 } 4697 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; 4698 } 4699 scribble = kmalloc(conf->scribble_len, GFP_KERNEL); 4700 if (!scribble) { 4701 err = -ENOMEM; 4702 break; 4703 } 4704 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; 4705 } 4706#ifdef CONFIG_HOTPLUG_CPU 4707 conf->cpu_notify.notifier_call = raid456_cpu_notify; 4708 conf->cpu_notify.priority = 0; 4709 if (err == 0) 4710 err = register_cpu_notifier(&conf->cpu_notify); 4711#endif 4712 put_online_cpus(); 4713 4714 return err; 4715} 4716 4717static raid5_conf_t *setup_conf(mddev_t *mddev) 4718{ 4719 raid5_conf_t *conf; 4720 int raid_disk, memory, max_disks; 4721 mdk_rdev_t *rdev; 4722 struct disk_info *disk; 4723 4724 if (mddev->new_level != 5 4725 && mddev->new_level != 4 4726 && mddev->new_level != 6) { 4727 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4728 mdname(mddev), mddev->new_level); 4729 return ERR_PTR(-EIO); 4730 } 4731 if ((mddev->new_level == 5 4732 && !algorithm_valid_raid5(mddev->new_layout)) || 4733 (mddev->new_level == 6 4734 && !algorithm_valid_raid6(mddev->new_layout))) { 4735 printk(KERN_ERR "raid5: %s: layout %d not supported\n", 4736 mdname(mddev), mddev->new_layout); 4737 return ERR_PTR(-EIO); 4738 } 4739 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 4740 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4741 mdname(mddev), mddev->raid_disks); 4742 return ERR_PTR(-EINVAL); 4743 } 4744 4745 if (!mddev->new_chunk_sectors || 4746 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 4747 !is_power_of_2(mddev->new_chunk_sectors)) { 4748 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4749 mddev->new_chunk_sectors << 9, mdname(mddev)); 4750 return ERR_PTR(-EINVAL); 4751 } 4752 4753 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); 4754 if (conf == NULL) 4755 goto abort; 4756 spin_lock_init(&conf->device_lock); 4757 init_waitqueue_head(&conf->wait_for_stripe); 4758 init_waitqueue_head(&conf->wait_for_overlap); 4759 INIT_LIST_HEAD(&conf->handle_list); 4760 INIT_LIST_HEAD(&conf->hold_list); 4761 INIT_LIST_HEAD(&conf->delayed_list); 4762 INIT_LIST_HEAD(&conf->bitmap_list); 4763 INIT_LIST_HEAD(&conf->inactive_list); 4764 atomic_set(&conf->active_stripes, 0); 4765 atomic_set(&conf->preread_active_stripes, 0); 4766 atomic_set(&conf->active_aligned_reads, 0); 4767 conf->bypass_threshold = BYPASS_THRESHOLD; 4768 4769 conf->raid_disks = mddev->raid_disks; 4770 if (mddev->reshape_position == MaxSector) 4771 conf->previous_raid_disks = mddev->raid_disks; 4772 else 4773 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4774 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 4775 conf->scribble_len = scribble_len(max_disks); 4776 4777 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), 4778 GFP_KERNEL); 4779 if (!conf->disks) 4780 goto abort; 4781 4782 conf->mddev = mddev; 4783 4784 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4785 goto abort; 4786 4787 conf->level = mddev->new_level; 4788 if (raid5_alloc_percpu(conf) != 0) 4789 goto abort; 4790 4791 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4792 4793 list_for_each_entry(rdev, &mddev->disks, same_set) { 4794 raid_disk = rdev->raid_disk; 4795 if (raid_disk >= max_disks 4796 || raid_disk < 0) 4797 continue; 4798 disk = conf->disks + raid_disk; 4799 4800 disk->rdev = rdev; 4801 4802 if (test_bit(In_sync, &rdev->flags)) { 4803 char b[BDEVNAME_SIZE]; 4804 printk(KERN_INFO "raid5: device %s operational as raid" 4805 " disk %d\n", bdevname(rdev->bdev,b), 4806 raid_disk); 4807 } else 4808 /* Cannot rely on bitmap to complete recovery */ 4809 conf->fullsync = 1; 4810 } 4811 4812 conf->chunk_sectors = mddev->new_chunk_sectors; 4813 conf->level = mddev->new_level; 4814 if (conf->level == 6) 4815 conf->max_degraded = 2; 4816 else 4817 conf->max_degraded = 1; 4818 conf->algorithm = mddev->new_layout; 4819 conf->max_nr_stripes = NR_STRIPES; 4820 conf->reshape_progress = mddev->reshape_position; 4821 if (conf->reshape_progress != MaxSector) { 4822 conf->prev_chunk_sectors = mddev->chunk_sectors; 4823 conf->prev_algo = mddev->layout; 4824 } 4825 4826 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4827 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4828 if (grow_stripes(conf, conf->max_nr_stripes)) { 4829 printk(KERN_ERR 4830 "raid5: couldn't allocate %dkB for buffers\n", memory); 4831 goto abort; 4832 } else 4833 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4834 memory, mdname(mddev)); 4835 4836 conf->thread = md_register_thread(raid5d, mddev, NULL); 4837 if (!conf->thread) { 4838 printk(KERN_ERR 4839 "raid5: couldn't allocate thread for %s\n", 4840 mdname(mddev)); 4841 goto abort; 4842 } 4843 4844 return conf; 4845 4846 abort: 4847 if (conf) { 4848 free_conf(conf); 4849 return ERR_PTR(-EIO); 4850 } else 4851 return ERR_PTR(-ENOMEM); 4852} 4853 4854 4855static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 4856{ 4857 switch (algo) { 4858 case ALGORITHM_PARITY_0: 4859 if (raid_disk < max_degraded) 4860 return 1; 4861 break; 4862 case ALGORITHM_PARITY_N: 4863 if (raid_disk >= raid_disks - max_degraded) 4864 return 1; 4865 break; 4866 case ALGORITHM_PARITY_0_6: 4867 if (raid_disk == 0 || 4868 raid_disk == raid_disks - 1) 4869 return 1; 4870 break; 4871 case ALGORITHM_LEFT_ASYMMETRIC_6: 4872 case ALGORITHM_RIGHT_ASYMMETRIC_6: 4873 case ALGORITHM_LEFT_SYMMETRIC_6: 4874 case ALGORITHM_RIGHT_SYMMETRIC_6: 4875 if (raid_disk == raid_disks - 1) 4876 return 1; 4877 } 4878 return 0; 4879} 4880 4881static int run(mddev_t *mddev) 4882{ 4883 raid5_conf_t *conf; 4884 int working_disks = 0, chunk_size; 4885 int dirty_parity_disks = 0; 4886 mdk_rdev_t *rdev; 4887 sector_t reshape_offset = 0; 4888 4889 if (mddev->recovery_cp != MaxSector) 4890 printk(KERN_NOTICE "raid5: %s is not clean" 4891 " -- starting background reconstruction\n", 4892 mdname(mddev)); 4893 if (mddev->reshape_position != MaxSector) { 4894 /* Check that we can continue the reshape. 4895 * Currently only disks can change, it must 4896 * increase, and we must be past the point where 4897 * a stripe over-writes itself 4898 */ 4899 sector_t here_new, here_old; 4900 int old_disks; 4901 int max_degraded = (mddev->level == 6 ? 2 : 1); 4902 4903 if (mddev->new_level != mddev->level) { 4904 printk(KERN_ERR "raid5: %s: unsupported reshape " 4905 "required - aborting.\n", 4906 mdname(mddev)); 4907 return -EINVAL; 4908 } 4909 old_disks = mddev->raid_disks - mddev->delta_disks; 4910 /* reshape_position must be on a new-stripe boundary, and one 4911 * further up in new geometry must map after here in old 4912 * geometry. 4913 */ 4914 here_new = mddev->reshape_position; 4915 if (sector_div(here_new, mddev->new_chunk_sectors * 4916 (mddev->raid_disks - max_degraded))) { 4917 printk(KERN_ERR "raid5: reshape_position not " 4918 "on a stripe boundary\n"); 4919 return -EINVAL; 4920 } 4921 reshape_offset = here_new * mddev->new_chunk_sectors; 4922 /* here_new is the stripe we will write to */ 4923 here_old = mddev->reshape_position; 4924 sector_div(here_old, mddev->chunk_sectors * 4925 (old_disks-max_degraded)); 4926 /* here_old is the first stripe that we might need to read 4927 * from */ 4928 if (mddev->delta_disks == 0) { 4929 /* We cannot be sure it is safe to start an in-place 4930 * reshape. It is only safe if user-space if monitoring 4931 * and taking constant backups. 4932 * mdadm always starts a situation like this in 4933 * readonly mode so it can take control before 4934 * allowing any writes. So just check for that. 4935 */ 4936 if ((here_new * mddev->new_chunk_sectors != 4937 here_old * mddev->chunk_sectors) || 4938 mddev->ro == 0) { 4939 printk(KERN_ERR "raid5: in-place reshape must be started" 4940 " in read-only mode - aborting\n"); 4941 return -EINVAL; 4942 } 4943 } else if (mddev->delta_disks < 0 4944 ? (here_new * mddev->new_chunk_sectors <= 4945 here_old * mddev->chunk_sectors) 4946 : (here_new * mddev->new_chunk_sectors >= 4947 here_old * mddev->chunk_sectors)) { 4948 /* Reading from the same stripe as writing to - bad */ 4949 printk(KERN_ERR "raid5: reshape_position too early for " 4950 "auto-recovery - aborting.\n"); 4951 return -EINVAL; 4952 } 4953 printk(KERN_INFO "raid5: reshape will continue\n"); 4954 /* OK, we should be able to continue; */ 4955 } else { 4956 BUG_ON(mddev->level != mddev->new_level); 4957 BUG_ON(mddev->layout != mddev->new_layout); 4958 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 4959 BUG_ON(mddev->delta_disks != 0); 4960 } 4961 4962 if (mddev->private == NULL) 4963 conf = setup_conf(mddev); 4964 else 4965 conf = mddev->private; 4966 4967 if (IS_ERR(conf)) 4968 return PTR_ERR(conf); 4969 4970 mddev->thread = conf->thread; 4971 conf->thread = NULL; 4972 mddev->private = conf; 4973 4974 /* 4975 * 0 for a fully functional array, 1 or 2 for a degraded array. 4976 */ 4977 list_for_each_entry(rdev, &mddev->disks, same_set) { 4978 if (rdev->raid_disk < 0) 4979 continue; 4980 if (test_bit(In_sync, &rdev->flags)) 4981 working_disks++; 4982 /* This disc is not fully in-sync. However if it 4983 * just stored parity (beyond the recovery_offset), 4984 * when we don't need to be concerned about the 4985 * array being dirty. 4986 * When reshape goes 'backwards', we never have 4987 * partially completed devices, so we only need 4988 * to worry about reshape going forwards. 4989 */ 4990 /* Hack because v0.91 doesn't store recovery_offset properly. */ 4991 if (mddev->major_version == 0 && 4992 mddev->minor_version > 90) 4993 rdev->recovery_offset = reshape_offset; 4994 4995 printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n", 4996 rdev->raid_disk, working_disks, conf->prev_algo, 4997 conf->previous_raid_disks, conf->max_degraded, 4998 conf->algorithm, conf->raid_disks, 4999 only_parity(rdev->raid_disk, 5000 conf->prev_algo, 5001 conf->previous_raid_disks, 5002 conf->max_degraded), 5003 only_parity(rdev->raid_disk, 5004 conf->algorithm, 5005 conf->raid_disks, 5006 conf->max_degraded)); 5007 if (rdev->recovery_offset < reshape_offset) { 5008 /* We need to check old and new layout */ 5009 if (!only_parity(rdev->raid_disk, 5010 conf->algorithm, 5011 conf->raid_disks, 5012 conf->max_degraded)) 5013 continue; 5014 } 5015 if (!only_parity(rdev->raid_disk, 5016 conf->prev_algo, 5017 conf->previous_raid_disks, 5018 conf->max_degraded)) 5019 continue; 5020 dirty_parity_disks++; 5021 } 5022 5023 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks) 5024 - working_disks); 5025 5026 if (mddev->degraded > conf->max_degraded) { 5027 printk(KERN_ERR "raid5: not enough operational devices for %s" 5028 " (%d/%d failed)\n", 5029 mdname(mddev), mddev->degraded, conf->raid_disks); 5030 goto abort; 5031 } 5032 5033 /* device size must be a multiple of chunk size */ 5034 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 5035 mddev->resync_max_sectors = mddev->dev_sectors; 5036 5037 if (mddev->degraded > dirty_parity_disks && 5038 mddev->recovery_cp != MaxSector) { 5039 if (mddev->ok_start_degraded) 5040 printk(KERN_WARNING 5041 "raid5: starting dirty degraded array: %s" 5042 "- data corruption possible.\n", 5043 mdname(mddev)); 5044 else { 5045 printk(KERN_ERR 5046 "raid5: cannot start dirty degraded array for %s\n", 5047 mdname(mddev)); 5048 goto abort; 5049 } 5050 } 5051 5052 if (mddev->degraded == 0) 5053 printk("raid5: raid level %d set %s active with %d out of %d" 5054 " devices, algorithm %d\n", conf->level, mdname(mddev), 5055 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 5056 mddev->new_layout); 5057 else 5058 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 5059 " out of %d devices, algorithm %d\n", conf->level, 5060 mdname(mddev), mddev->raid_disks - mddev->degraded, 5061 mddev->raid_disks, mddev->new_layout); 5062 5063 print_raid5_conf(conf); 5064 5065 if (conf->reshape_progress != MaxSector) { 5066 printk("...ok start reshape thread\n"); 5067 conf->reshape_safe = conf->reshape_progress; 5068 atomic_set(&conf->reshape_stripes, 0); 5069 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5070 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5071 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 5072 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 5073 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 5074 "reshape"); 5075 } 5076 5077 /* read-ahead size must cover two whole stripes, which is 5078 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 5079 */ 5080 { 5081 int data_disks = conf->previous_raid_disks - conf->max_degraded; 5082 int stripe = data_disks * 5083 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 5084 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 5085 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 5086 } 5087 5088 /* Ok, everything is just fine now */ 5089 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 5090 printk(KERN_WARNING 5091 "raid5: failed to create sysfs attributes for %s\n", 5092 mdname(mddev)); 5093 5094 mddev->queue->queue_lock = &conf->device_lock; 5095 5096 mddev->queue->unplug_fn = raid5_unplug_device; 5097 mddev->queue->backing_dev_info.congested_data = mddev; 5098 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5099 5100 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5101 5102 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 5103 chunk_size = mddev->chunk_sectors << 9; 5104 blk_queue_io_min(mddev->queue, chunk_size); 5105 blk_queue_io_opt(mddev->queue, chunk_size * 5106 (conf->raid_disks - conf->max_degraded)); 5107 5108 list_for_each_entry(rdev, &mddev->disks, same_set) 5109 disk_stack_limits(mddev->gendisk, rdev->bdev, 5110 rdev->data_offset << 9); 5111 5112 return 0; 5113abort: 5114 md_unregister_thread(mddev->thread); 5115 mddev->thread = NULL; 5116 if (conf) { 5117 print_raid5_conf(conf); 5118 free_conf(conf); 5119 } 5120 mddev->private = NULL; 5121 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 5122 return -EIO; 5123} 5124 5125 5126 5127static int stop(mddev_t *mddev) 5128{ 5129 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 5130 5131 md_unregister_thread(mddev->thread); 5132 mddev->thread = NULL; 5133 mddev->queue->backing_dev_info.congested_fn = NULL; 5134 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5135 free_conf(conf); 5136 mddev->private = &raid5_attrs_group; 5137 return 0; 5138} 5139 5140#ifdef DEBUG 5141static void print_sh(struct seq_file *seq, struct stripe_head *sh) 5142{ 5143 int i; 5144 5145 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 5146 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 5147 seq_printf(seq, "sh %llu, count %d.\n", 5148 (unsigned long long)sh->sector, atomic_read(&sh->count)); 5149 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 5150 for (i = 0; i < sh->disks; i++) { 5151 seq_printf(seq, "(cache%d: %p %ld) ", 5152 i, sh->dev[i].page, sh->dev[i].flags); 5153 } 5154 seq_printf(seq, "\n"); 5155} 5156 5157static void printall(struct seq_file *seq, raid5_conf_t *conf) 5158{ 5159 struct stripe_head *sh; 5160 struct hlist_node *hn; 5161 int i; 5162 5163 spin_lock_irq(&conf->device_lock); 5164 for (i = 0; i < NR_HASH; i++) { 5165 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 5166 if (sh->raid_conf != conf) 5167 continue; 5168 print_sh(seq, sh); 5169 } 5170 } 5171 spin_unlock_irq(&conf->device_lock); 5172} 5173#endif 5174 5175static void status(struct seq_file *seq, mddev_t *mddev) 5176{ 5177 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 5178 int i; 5179 5180 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 5181 mddev->chunk_sectors / 2, mddev->layout); 5182 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 5183 for (i = 0; i < conf->raid_disks; i++) 5184 seq_printf (seq, "%s", 5185 conf->disks[i].rdev && 5186 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 5187 seq_printf (seq, "]"); 5188#ifdef DEBUG 5189 seq_printf (seq, "\n"); 5190 printall(seq, conf); 5191#endif 5192} 5193 5194static void print_raid5_conf (raid5_conf_t *conf) 5195{ 5196 int i; 5197 struct disk_info *tmp; 5198 5199 printk("RAID5 conf printout:\n"); 5200 if (!conf) { 5201 printk("(conf==NULL)\n"); 5202 return; 5203 } 5204 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 5205 conf->raid_disks - conf->mddev->degraded); 5206 5207 for (i = 0; i < conf->raid_disks; i++) { 5208 char b[BDEVNAME_SIZE]; 5209 tmp = conf->disks + i; 5210 if (tmp->rdev) 5211 printk(" disk %d, o:%d, dev:%s\n", 5212 i, !test_bit(Faulty, &tmp->rdev->flags), 5213 bdevname(tmp->rdev->bdev,b)); 5214 } 5215} 5216 5217static int raid5_spare_active(mddev_t *mddev) 5218{ 5219 int i; 5220 raid5_conf_t *conf = mddev->private; 5221 struct disk_info *tmp; 5222 5223 for (i = 0; i < conf->raid_disks; i++) { 5224 tmp = conf->disks + i; 5225 if (tmp->rdev 5226 && !test_bit(Faulty, &tmp->rdev->flags) 5227 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 5228 unsigned long flags; 5229 spin_lock_irqsave(&conf->device_lock, flags); 5230 mddev->degraded--; 5231 spin_unlock_irqrestore(&conf->device_lock, flags); 5232 } 5233 } 5234 print_raid5_conf(conf); 5235 return 0; 5236} 5237 5238static int raid5_remove_disk(mddev_t *mddev, int number) 5239{ 5240 raid5_conf_t *conf = mddev->private; 5241 int err = 0; 5242 mdk_rdev_t *rdev; 5243 struct disk_info *p = conf->disks + number; 5244 5245 print_raid5_conf(conf); 5246 rdev = p->rdev; 5247 if (rdev) { 5248 if (number >= conf->raid_disks && 5249 conf->reshape_progress == MaxSector) 5250 clear_bit(In_sync, &rdev->flags); 5251 5252 if (test_bit(In_sync, &rdev->flags) || 5253 atomic_read(&rdev->nr_pending)) { 5254 err = -EBUSY; 5255 goto abort; 5256 } 5257 /* Only remove non-faulty devices if recovery 5258 * isn't possible. 5259 */ 5260 if (!test_bit(Faulty, &rdev->flags) && 5261 mddev->degraded <= conf->max_degraded && 5262 number < conf->raid_disks) { 5263 err = -EBUSY; 5264 goto abort; 5265 } 5266 p->rdev = NULL; 5267 synchronize_rcu(); 5268 if (atomic_read(&rdev->nr_pending)) { 5269 /* lost the race, try later */ 5270 err = -EBUSY; 5271 p->rdev = rdev; 5272 } 5273 } 5274abort: 5275 5276 print_raid5_conf(conf); 5277 return err; 5278} 5279 5280static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 5281{ 5282 raid5_conf_t *conf = mddev->private; 5283 int err = -EEXIST; 5284 int disk; 5285 struct disk_info *p; 5286 int first = 0; 5287 int last = conf->raid_disks - 1; 5288 5289 if (mddev->degraded > conf->max_degraded) 5290 /* no point adding a device */ 5291 return -EINVAL; 5292 5293 if (rdev->raid_disk >= 0) 5294 first = last = rdev->raid_disk; 5295 5296 /* 5297 * find the disk ... but prefer rdev->saved_raid_disk 5298 * if possible. 5299 */ 5300 if (rdev->saved_raid_disk >= 0 && 5301 rdev->saved_raid_disk >= first && 5302 conf->disks[rdev->saved_raid_disk].rdev == NULL) 5303 disk = rdev->saved_raid_disk; 5304 else 5305 disk = first; 5306 for ( ; disk <= last ; disk++) 5307 if ((p=conf->disks + disk)->rdev == NULL) { 5308 clear_bit(In_sync, &rdev->flags); 5309 rdev->raid_disk = disk; 5310 err = 0; 5311 if (rdev->saved_raid_disk != disk) 5312 conf->fullsync = 1; 5313 rcu_assign_pointer(p->rdev, rdev); 5314 break; 5315 } 5316 print_raid5_conf(conf); 5317 return err; 5318} 5319 5320static int raid5_resize(mddev_t *mddev, sector_t sectors) 5321{ 5322 /* no resync is happening, and there is enough space 5323 * on all devices, so we can resize. 5324 * We need to make sure resync covers any new space. 5325 * If the array is shrinking we should possibly wait until 5326 * any io in the removed space completes, but it hardly seems 5327 * worth it. 5328 */ 5329 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 5330 md_set_array_sectors(mddev, raid5_size(mddev, sectors, 5331 mddev->raid_disks)); 5332 if (mddev->array_sectors > 5333 raid5_size(mddev, sectors, mddev->raid_disks)) 5334 return -EINVAL; 5335 set_capacity(mddev->gendisk, mddev->array_sectors); 5336 mddev->changed = 1; 5337 revalidate_disk(mddev->gendisk); 5338 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { 5339 mddev->recovery_cp = mddev->dev_sectors; 5340 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5341 } 5342 mddev->dev_sectors = sectors; 5343 mddev->resync_max_sectors = sectors; 5344 return 0; 5345} 5346 5347static int check_stripe_cache(mddev_t *mddev) 5348{ 5349 /* Can only proceed if there are plenty of stripe_heads. 5350 * We need a minimum of one full stripe,, and for sensible progress 5351 * it is best to have about 4 times that. 5352 * If we require 4 times, then the default 256 4K stripe_heads will 5353 * allow for chunk sizes up to 256K, which is probably OK. 5354 * If the chunk size is greater, user-space should request more 5355 * stripe_heads first. 5356 */ 5357 raid5_conf_t *conf = mddev->private; 5358 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 5359 > conf->max_nr_stripes || 5360 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 5361 > conf->max_nr_stripes) { 5362 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 5363 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 5364 / STRIPE_SIZE)*4); 5365 return 0; 5366 } 5367 return 1; 5368} 5369 5370static int check_reshape(mddev_t *mddev) 5371{ 5372 raid5_conf_t *conf = mddev->private; 5373 5374 if (mddev->delta_disks == 0 && 5375 mddev->new_layout == mddev->layout && 5376 mddev->new_chunk_sectors == mddev->chunk_sectors) 5377 return 0; /* nothing to do */ 5378 if (mddev->bitmap) 5379 /* Cannot grow a bitmap yet */ 5380 return -EBUSY; 5381 if (mddev->degraded > conf->max_degraded) 5382 return -EINVAL; 5383 if (mddev->delta_disks < 0) { 5384 /* We might be able to shrink, but the devices must 5385 * be made bigger first. 5386 * For raid6, 4 is the minimum size. 5387 * Otherwise 2 is the minimum 5388 */ 5389 int min = 2; 5390 if (mddev->level == 6) 5391 min = 4; 5392 if (mddev->raid_disks + mddev->delta_disks < min) 5393 return -EINVAL; 5394 } 5395 5396 if (!check_stripe_cache(mddev)) 5397 return -ENOSPC; 5398 5399 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 5400} 5401 5402static int raid5_start_reshape(mddev_t *mddev) 5403{ 5404 raid5_conf_t *conf = mddev->private; 5405 mdk_rdev_t *rdev; 5406 int spares = 0; 5407 int added_devices = 0; 5408 unsigned long flags; 5409 5410 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5411 return -EBUSY; 5412 5413 if (!check_stripe_cache(mddev)) 5414 return -ENOSPC; 5415 5416 list_for_each_entry(rdev, &mddev->disks, same_set) 5417 if (rdev->raid_disk < 0 && 5418 !test_bit(Faulty, &rdev->flags)) 5419 spares++; 5420 5421 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 5422 /* Not enough devices even to make a degraded array 5423 * of that size 5424 */ 5425 return -EINVAL; 5426 5427 /* Refuse to reduce size of the array. Any reductions in 5428 * array size must be through explicit setting of array_size 5429 * attribute. 5430 */ 5431 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 5432 < mddev->array_sectors) { 5433 printk(KERN_ERR "md: %s: array size must be reduced " 5434 "before number of disks\n", mdname(mddev)); 5435 return -EINVAL; 5436 } 5437 5438 atomic_set(&conf->reshape_stripes, 0); 5439 spin_lock_irq(&conf->device_lock); 5440 conf->previous_raid_disks = conf->raid_disks; 5441 conf->raid_disks += mddev->delta_disks; 5442 conf->prev_chunk_sectors = conf->chunk_sectors; 5443 conf->chunk_sectors = mddev->new_chunk_sectors; 5444 conf->prev_algo = conf->algorithm; 5445 conf->algorithm = mddev->new_layout; 5446 if (mddev->delta_disks < 0) 5447 conf->reshape_progress = raid5_size(mddev, 0, 0); 5448 else 5449 conf->reshape_progress = 0; 5450 conf->reshape_safe = conf->reshape_progress; 5451 conf->generation++; 5452 spin_unlock_irq(&conf->device_lock); 5453 5454 /* Add some new drives, as many as will fit. 5455 * We know there are enough to make the newly sized array work. 5456 */ 5457 list_for_each_entry(rdev, &mddev->disks, same_set) 5458 if (rdev->raid_disk < 0 && 5459 !test_bit(Faulty, &rdev->flags)) { 5460 if (raid5_add_disk(mddev, rdev) == 0) { 5461 char nm[20]; 5462 if (rdev->raid_disk >= conf->previous_raid_disks) { 5463 set_bit(In_sync, &rdev->flags); 5464 added_devices++; 5465 } else 5466 rdev->recovery_offset = 0; 5467 sprintf(nm, "rd%d", rdev->raid_disk); 5468 if (sysfs_create_link(&mddev->kobj, 5469 &rdev->kobj, nm)) 5470 printk(KERN_WARNING 5471 "raid5: failed to create " 5472 " link %s for %s\n", 5473 nm, mdname(mddev)); 5474 } else 5475 break; 5476 } 5477 5478 /* When a reshape changes the number of devices, ->degraded 5479 * is measured against the large of the pre and post number of 5480 * devices.*/ 5481 if (mddev->delta_disks > 0) { 5482 spin_lock_irqsave(&conf->device_lock, flags); 5483 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) 5484 - added_devices; 5485 spin_unlock_irqrestore(&conf->device_lock, flags); 5486 } 5487 mddev->raid_disks = conf->raid_disks; 5488 mddev->reshape_position = conf->reshape_progress; 5489 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5490 5491 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5492 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5493 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 5494 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 5495 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 5496 "reshape"); 5497 if (!mddev->sync_thread) { 5498 mddev->recovery = 0; 5499 spin_lock_irq(&conf->device_lock); 5500 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 5501 conf->reshape_progress = MaxSector; 5502 spin_unlock_irq(&conf->device_lock); 5503 return -EAGAIN; 5504 } 5505 conf->reshape_checkpoint = jiffies; 5506 md_wakeup_thread(mddev->sync_thread); 5507 md_new_event(mddev); 5508 return 0; 5509} 5510 5511/* This is called from the reshape thread and should make any 5512 * changes needed in 'conf' 5513 */ 5514static void end_reshape(raid5_conf_t *conf) 5515{ 5516 5517 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 5518 5519 spin_lock_irq(&conf->device_lock); 5520 conf->previous_raid_disks = conf->raid_disks; 5521 conf->reshape_progress = MaxSector; 5522 spin_unlock_irq(&conf->device_lock); 5523 wake_up(&conf->wait_for_overlap); 5524 5525 /* read-ahead size must cover two whole stripes, which is 5526 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 5527 */ 5528 { 5529 int data_disks = conf->raid_disks - conf->max_degraded; 5530 int stripe = data_disks * ((conf->chunk_sectors << 9) 5531 / PAGE_SIZE); 5532 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 5533 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 5534 } 5535 } 5536} 5537 5538/* This is called from the raid5d thread with mddev_lock held. 5539 * It makes config changes to the device. 5540 */ 5541static void raid5_finish_reshape(mddev_t *mddev) 5542{ 5543 raid5_conf_t *conf = mddev->private; 5544 5545 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5546 5547 if (mddev->delta_disks > 0) { 5548 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5549 set_capacity(mddev->gendisk, mddev->array_sectors); 5550 mddev->changed = 1; 5551 revalidate_disk(mddev->gendisk); 5552 } else { 5553 int d; 5554 mddev->degraded = conf->raid_disks; 5555 for (d = 0; d < conf->raid_disks ; d++) 5556 if (conf->disks[d].rdev && 5557 test_bit(In_sync, 5558 &conf->disks[d].rdev->flags)) 5559 mddev->degraded--; 5560 for (d = conf->raid_disks ; 5561 d < conf->raid_disks - mddev->delta_disks; 5562 d++) { 5563 mdk_rdev_t *rdev = conf->disks[d].rdev; 5564 if (rdev && raid5_remove_disk(mddev, d) == 0) { 5565 char nm[20]; 5566 sprintf(nm, "rd%d", rdev->raid_disk); 5567 sysfs_remove_link(&mddev->kobj, nm); 5568 rdev->raid_disk = -1; 5569 } 5570 } 5571 } 5572 mddev->layout = conf->algorithm; 5573 mddev->chunk_sectors = conf->chunk_sectors; 5574 mddev->reshape_position = MaxSector; 5575 mddev->delta_disks = 0; 5576 } 5577} 5578 5579static void raid5_quiesce(mddev_t *mddev, int state) 5580{ 5581 raid5_conf_t *conf = mddev->private; 5582 5583 switch(state) { 5584 case 2: /* resume for a suspend */ 5585 wake_up(&conf->wait_for_overlap); 5586 break; 5587 5588 case 1: /* stop all writes */ 5589 spin_lock_irq(&conf->device_lock); 5590 /* '2' tells resync/reshape to pause so that all 5591 * active stripes can drain 5592 */ 5593 conf->quiesce = 2; 5594 wait_event_lock_irq(conf->wait_for_stripe, 5595 atomic_read(&conf->active_stripes) == 0 && 5596 atomic_read(&conf->active_aligned_reads) == 0, 5597 conf->device_lock, /* nothing */); 5598 conf->quiesce = 1; 5599 spin_unlock_irq(&conf->device_lock); 5600 /* allow reshape to continue */ 5601 wake_up(&conf->wait_for_overlap); 5602 break; 5603 5604 case 0: /* re-enable writes */ 5605 spin_lock_irq(&conf->device_lock); 5606 conf->quiesce = 0; 5607 wake_up(&conf->wait_for_stripe); 5608 wake_up(&conf->wait_for_overlap); 5609 spin_unlock_irq(&conf->device_lock); 5610 break; 5611 } 5612} 5613 5614 5615static void *raid5_takeover_raid1(mddev_t *mddev) 5616{ 5617 int chunksect; 5618 5619 if (mddev->raid_disks != 2 || 5620 mddev->degraded > 1) 5621 return ERR_PTR(-EINVAL); 5622 5623 /* Should check if there are write-behind devices? */ 5624 5625 chunksect = 64*2; /* 64K by default */ 5626 5627 /* The array must be an exact multiple of chunksize */ 5628 while (chunksect && (mddev->array_sectors & (chunksect-1))) 5629 chunksect >>= 1; 5630 5631 if ((chunksect<<9) < STRIPE_SIZE) 5632 /* array size does not allow a suitable chunk size */ 5633 return ERR_PTR(-EINVAL); 5634 5635 mddev->new_level = 5; 5636 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 5637 mddev->new_chunk_sectors = chunksect; 5638 5639 return setup_conf(mddev); 5640} 5641 5642static void *raid5_takeover_raid6(mddev_t *mddev) 5643{ 5644 int new_layout; 5645 5646 switch (mddev->layout) { 5647 case ALGORITHM_LEFT_ASYMMETRIC_6: 5648 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 5649 break; 5650 case ALGORITHM_RIGHT_ASYMMETRIC_6: 5651 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 5652 break; 5653 case ALGORITHM_LEFT_SYMMETRIC_6: 5654 new_layout = ALGORITHM_LEFT_SYMMETRIC; 5655 break; 5656 case ALGORITHM_RIGHT_SYMMETRIC_6: 5657 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 5658 break; 5659 case ALGORITHM_PARITY_0_6: 5660 new_layout = ALGORITHM_PARITY_0; 5661 break; 5662 case ALGORITHM_PARITY_N: 5663 new_layout = ALGORITHM_PARITY_N; 5664 break; 5665 default: 5666 return ERR_PTR(-EINVAL); 5667 } 5668 mddev->new_level = 5; 5669 mddev->new_layout = new_layout; 5670 mddev->delta_disks = -1; 5671 mddev->raid_disks -= 1; 5672 return setup_conf(mddev); 5673} 5674 5675 5676static int raid5_check_reshape(mddev_t *mddev) 5677{ 5678 /* For a 2-drive array, the layout and chunk size can be changed 5679 * immediately as not restriping is needed. 5680 * For larger arrays we record the new value - after validation 5681 * to be used by a reshape pass. 5682 */ 5683 raid5_conf_t *conf = mddev->private; 5684 int new_chunk = mddev->new_chunk_sectors; 5685 5686 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 5687 return -EINVAL; 5688 if (new_chunk > 0) { 5689 if (!is_power_of_2(new_chunk)) 5690 return -EINVAL; 5691 if (new_chunk < (PAGE_SIZE>>9)) 5692 return -EINVAL; 5693 if (mddev->array_sectors & (new_chunk-1)) 5694 /* not factor of array size */ 5695 return -EINVAL; 5696 } 5697 5698 /* They look valid */ 5699 5700 if (mddev->raid_disks == 2) { 5701 /* can make the change immediately */ 5702 if (mddev->new_layout >= 0) { 5703 conf->algorithm = mddev->new_layout; 5704 mddev->layout = mddev->new_layout; 5705 } 5706 if (new_chunk > 0) { 5707 conf->chunk_sectors = new_chunk ; 5708 mddev->chunk_sectors = new_chunk; 5709 } 5710 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5711 md_wakeup_thread(mddev->thread); 5712 } 5713 return check_reshape(mddev); 5714} 5715 5716static int raid6_check_reshape(mddev_t *mddev) 5717{ 5718 int new_chunk = mddev->new_chunk_sectors; 5719 5720 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 5721 return -EINVAL; 5722 if (new_chunk > 0) { 5723 if (!is_power_of_2(new_chunk)) 5724 return -EINVAL; 5725 if (new_chunk < (PAGE_SIZE >> 9)) 5726 return -EINVAL; 5727 if (mddev->array_sectors & (new_chunk-1)) 5728 /* not factor of array size */ 5729 return -EINVAL; 5730 } 5731 5732 /* They look valid */ 5733 return check_reshape(mddev); 5734} 5735 5736static void *raid5_takeover(mddev_t *mddev) 5737{ 5738 /* raid5 can take over: 5739 * raid0 - if all devices are the same - make it a raid4 layout 5740 * raid1 - if there are two drives. We need to know the chunk size 5741 * raid4 - trivial - just use a raid4 layout. 5742 * raid6 - Providing it is a *_6 layout 5743 */ 5744 5745 if (mddev->level == 1) 5746 return raid5_takeover_raid1(mddev); 5747 if (mddev->level == 4) { 5748 mddev->new_layout = ALGORITHM_PARITY_N; 5749 mddev->new_level = 5; 5750 return setup_conf(mddev); 5751 } 5752 if (mddev->level == 6) 5753 return raid5_takeover_raid6(mddev); 5754 5755 return ERR_PTR(-EINVAL); 5756} 5757 5758 5759static struct mdk_personality raid5_personality; 5760 5761static void *raid6_takeover(mddev_t *mddev) 5762{ 5763 /* Currently can only take over a raid5. We map the 5764 * personality to an equivalent raid6 personality 5765 * with the Q block at the end. 5766 */ 5767 int new_layout; 5768 5769 if (mddev->pers != &raid5_personality) 5770 return ERR_PTR(-EINVAL); 5771 if (mddev->degraded > 1) 5772 return ERR_PTR(-EINVAL); 5773 if (mddev->raid_disks > 253) 5774 return ERR_PTR(-EINVAL); 5775 if (mddev->raid_disks < 3) 5776 return ERR_PTR(-EINVAL); 5777 5778 switch (mddev->layout) { 5779 case ALGORITHM_LEFT_ASYMMETRIC: 5780 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 5781 break; 5782 case ALGORITHM_RIGHT_ASYMMETRIC: 5783 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 5784 break; 5785 case ALGORITHM_LEFT_SYMMETRIC: 5786 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 5787 break; 5788 case ALGORITHM_RIGHT_SYMMETRIC: 5789 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 5790 break; 5791 case ALGORITHM_PARITY_0: 5792 new_layout = ALGORITHM_PARITY_0_6; 5793 break; 5794 case ALGORITHM_PARITY_N: 5795 new_layout = ALGORITHM_PARITY_N; 5796 break; 5797 default: 5798 return ERR_PTR(-EINVAL); 5799 } 5800 mddev->new_level = 6; 5801 mddev->new_layout = new_layout; 5802 mddev->delta_disks = 1; 5803 mddev->raid_disks += 1; 5804 return setup_conf(mddev); 5805} 5806 5807 5808static struct mdk_personality raid6_personality = 5809{ 5810 .name = "raid6", 5811 .level = 6, 5812 .owner = THIS_MODULE, 5813 .make_request = make_request, 5814 .run = run, 5815 .stop = stop, 5816 .status = status, 5817 .error_handler = error, 5818 .hot_add_disk = raid5_add_disk, 5819 .hot_remove_disk= raid5_remove_disk, 5820 .spare_active = raid5_spare_active, 5821 .sync_request = sync_request, 5822 .resize = raid5_resize, 5823 .size = raid5_size, 5824 .check_reshape = raid6_check_reshape, 5825 .start_reshape = raid5_start_reshape, 5826 .finish_reshape = raid5_finish_reshape, 5827 .quiesce = raid5_quiesce, 5828 .takeover = raid6_takeover, 5829}; 5830static struct mdk_personality raid5_personality = 5831{ 5832 .name = "raid5", 5833 .level = 5, 5834 .owner = THIS_MODULE, 5835 .make_request = make_request, 5836 .run = run, 5837 .stop = stop, 5838 .status = status, 5839 .error_handler = error, 5840 .hot_add_disk = raid5_add_disk, 5841 .hot_remove_disk= raid5_remove_disk, 5842 .spare_active = raid5_spare_active, 5843 .sync_request = sync_request, 5844 .resize = raid5_resize, 5845 .size = raid5_size, 5846 .check_reshape = raid5_check_reshape, 5847 .start_reshape = raid5_start_reshape, 5848 .finish_reshape = raid5_finish_reshape, 5849 .quiesce = raid5_quiesce, 5850 .takeover = raid5_takeover, 5851}; 5852 5853static struct mdk_personality raid4_personality = 5854{ 5855 .name = "raid4", 5856 .level = 4, 5857 .owner = THIS_MODULE, 5858 .make_request = make_request, 5859 .run = run, 5860 .stop = stop, 5861 .status = status, 5862 .error_handler = error, 5863 .hot_add_disk = raid5_add_disk, 5864 .hot_remove_disk= raid5_remove_disk, 5865 .spare_active = raid5_spare_active, 5866 .sync_request = sync_request, 5867 .resize = raid5_resize, 5868 .size = raid5_size, 5869 .check_reshape = raid5_check_reshape, 5870 .start_reshape = raid5_start_reshape, 5871 .finish_reshape = raid5_finish_reshape, 5872 .quiesce = raid5_quiesce, 5873}; 5874 5875static int __init raid5_init(void) 5876{ 5877 register_md_personality(&raid6_personality); 5878 register_md_personality(&raid5_personality); 5879 register_md_personality(&raid4_personality); 5880 return 0; 5881} 5882 5883static void raid5_exit(void) 5884{ 5885 unregister_md_personality(&raid6_personality); 5886 unregister_md_personality(&raid5_personality); 5887 unregister_md_personality(&raid4_personality); 5888} 5889 5890module_init(raid5_init); 5891module_exit(raid5_exit); 5892MODULE_LICENSE("GPL"); 5893MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 5894MODULE_ALIAS("md-personality-4"); /* RAID5 */ 5895MODULE_ALIAS("md-raid5"); 5896MODULE_ALIAS("md-raid4"); 5897MODULE_ALIAS("md-level-5"); 5898MODULE_ALIAS("md-level-4"); 5899MODULE_ALIAS("md-personality-8"); /* RAID6 */ 5900MODULE_ALIAS("md-raid6"); 5901MODULE_ALIAS("md-level-6"); 5902 5903/* This used to be two separate modules, they were: */ 5904MODULE_ALIAS("raid5"); 5905MODULE_ALIAS("raid6"); 5906