raid5.c revision ec32a2bd35bd6b933a5db6542c48210ce069a376
1/* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21/* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46#include <linux/blkdev.h> 47#include <linux/kthread.h> 48#include <linux/raid/pq.h> 49#include <linux/async_tx.h> 50#include <linux/seq_file.h> 51#include "md.h" 52#include "raid5.h" 53#include "bitmap.h" 54 55/* 56 * Stripe cache 57 */ 58 59#define NR_STRIPES 256 60#define STRIPE_SIZE PAGE_SIZE 61#define STRIPE_SHIFT (PAGE_SHIFT - 9) 62#define STRIPE_SECTORS (STRIPE_SIZE>>9) 63#define IO_THRESHOLD 1 64#define BYPASS_THRESHOLD 1 65#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 66#define HASH_MASK (NR_HASH - 1) 67 68#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 69 70/* bio's attached to a stripe+device for I/O are linked together in bi_sector 71 * order without overlap. There may be several bio's per stripe+device, and 72 * a bio could span several devices. 73 * When walking this list for a particular stripe+device, we must never proceed 74 * beyond a bio that extends past this device, as the next bio might no longer 75 * be valid. 76 * This macro is used to determine the 'next' bio in the list, given the sector 77 * of the current stripe+device 78 */ 79#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 80/* 81 * The following can be used to debug the driver 82 */ 83#define RAID5_PARANOIA 1 84#if RAID5_PARANOIA && defined(CONFIG_SMP) 85# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 86#else 87# define CHECK_DEVLOCK() 88#endif 89 90#ifdef DEBUG 91#define inline 92#define __inline__ 93#endif 94 95#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) 96 97/* 98 * We maintain a biased count of active stripes in the bottom 16 bits of 99 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 100 */ 101static inline int raid5_bi_phys_segments(struct bio *bio) 102{ 103 return bio->bi_phys_segments & 0xffff; 104} 105 106static inline int raid5_bi_hw_segments(struct bio *bio) 107{ 108 return (bio->bi_phys_segments >> 16) & 0xffff; 109} 110 111static inline int raid5_dec_bi_phys_segments(struct bio *bio) 112{ 113 --bio->bi_phys_segments; 114 return raid5_bi_phys_segments(bio); 115} 116 117static inline int raid5_dec_bi_hw_segments(struct bio *bio) 118{ 119 unsigned short val = raid5_bi_hw_segments(bio); 120 121 --val; 122 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); 123 return val; 124} 125 126static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 127{ 128 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); 129} 130 131/* Find first data disk in a raid6 stripe */ 132static inline int raid6_d0(struct stripe_head *sh) 133{ 134 if (sh->ddf_layout) 135 /* ddf always start from first device */ 136 return 0; 137 /* md starts just after Q block */ 138 if (sh->qd_idx == sh->disks - 1) 139 return 0; 140 else 141 return sh->qd_idx + 1; 142} 143static inline int raid6_next_disk(int disk, int raid_disks) 144{ 145 disk++; 146 return (disk < raid_disks) ? disk : 0; 147} 148 149/* When walking through the disks in a raid5, starting at raid6_d0, 150 * We need to map each disk to a 'slot', where the data disks are slot 151 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 152 * is raid_disks-1. This help does that mapping. 153 */ 154static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 155 int *count, int syndrome_disks) 156{ 157 int slot; 158 159 if (idx == sh->pd_idx) 160 return syndrome_disks; 161 if (idx == sh->qd_idx) 162 return syndrome_disks + 1; 163 slot = (*count)++; 164 return slot; 165} 166 167static void return_io(struct bio *return_bi) 168{ 169 struct bio *bi = return_bi; 170 while (bi) { 171 172 return_bi = bi->bi_next; 173 bi->bi_next = NULL; 174 bi->bi_size = 0; 175 bio_endio(bi, 0); 176 bi = return_bi; 177 } 178} 179 180static void print_raid5_conf (raid5_conf_t *conf); 181 182static int stripe_operations_active(struct stripe_head *sh) 183{ 184 return sh->check_state || sh->reconstruct_state || 185 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 186 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 187} 188 189static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 190{ 191 if (atomic_dec_and_test(&sh->count)) { 192 BUG_ON(!list_empty(&sh->lru)); 193 BUG_ON(atomic_read(&conf->active_stripes)==0); 194 if (test_bit(STRIPE_HANDLE, &sh->state)) { 195 if (test_bit(STRIPE_DELAYED, &sh->state)) { 196 list_add_tail(&sh->lru, &conf->delayed_list); 197 blk_plug_device(conf->mddev->queue); 198 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 199 sh->bm_seq - conf->seq_write > 0) { 200 list_add_tail(&sh->lru, &conf->bitmap_list); 201 blk_plug_device(conf->mddev->queue); 202 } else { 203 clear_bit(STRIPE_BIT_DELAY, &sh->state); 204 list_add_tail(&sh->lru, &conf->handle_list); 205 } 206 md_wakeup_thread(conf->mddev->thread); 207 } else { 208 BUG_ON(stripe_operations_active(sh)); 209 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 210 atomic_dec(&conf->preread_active_stripes); 211 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 212 md_wakeup_thread(conf->mddev->thread); 213 } 214 atomic_dec(&conf->active_stripes); 215 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 216 list_add_tail(&sh->lru, &conf->inactive_list); 217 wake_up(&conf->wait_for_stripe); 218 if (conf->retry_read_aligned) 219 md_wakeup_thread(conf->mddev->thread); 220 } 221 } 222 } 223} 224 225static void release_stripe(struct stripe_head *sh) 226{ 227 raid5_conf_t *conf = sh->raid_conf; 228 unsigned long flags; 229 230 spin_lock_irqsave(&conf->device_lock, flags); 231 __release_stripe(conf, sh); 232 spin_unlock_irqrestore(&conf->device_lock, flags); 233} 234 235static inline void remove_hash(struct stripe_head *sh) 236{ 237 pr_debug("remove_hash(), stripe %llu\n", 238 (unsigned long long)sh->sector); 239 240 hlist_del_init(&sh->hash); 241} 242 243static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 244{ 245 struct hlist_head *hp = stripe_hash(conf, sh->sector); 246 247 pr_debug("insert_hash(), stripe %llu\n", 248 (unsigned long long)sh->sector); 249 250 CHECK_DEVLOCK(); 251 hlist_add_head(&sh->hash, hp); 252} 253 254 255/* find an idle stripe, make sure it is unhashed, and return it. */ 256static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 257{ 258 struct stripe_head *sh = NULL; 259 struct list_head *first; 260 261 CHECK_DEVLOCK(); 262 if (list_empty(&conf->inactive_list)) 263 goto out; 264 first = conf->inactive_list.next; 265 sh = list_entry(first, struct stripe_head, lru); 266 list_del_init(first); 267 remove_hash(sh); 268 atomic_inc(&conf->active_stripes); 269out: 270 return sh; 271} 272 273static void shrink_buffers(struct stripe_head *sh, int num) 274{ 275 struct page *p; 276 int i; 277 278 for (i=0; i<num ; i++) { 279 p = sh->dev[i].page; 280 if (!p) 281 continue; 282 sh->dev[i].page = NULL; 283 put_page(p); 284 } 285} 286 287static int grow_buffers(struct stripe_head *sh, int num) 288{ 289 int i; 290 291 for (i=0; i<num; i++) { 292 struct page *page; 293 294 if (!(page = alloc_page(GFP_KERNEL))) { 295 return 1; 296 } 297 sh->dev[i].page = page; 298 } 299 return 0; 300} 301 302static void raid5_build_block(struct stripe_head *sh, int i); 303static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 304 struct stripe_head *sh); 305 306static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 307{ 308 raid5_conf_t *conf = sh->raid_conf; 309 int i; 310 311 BUG_ON(atomic_read(&sh->count) != 0); 312 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 313 BUG_ON(stripe_operations_active(sh)); 314 315 CHECK_DEVLOCK(); 316 pr_debug("init_stripe called, stripe %llu\n", 317 (unsigned long long)sh->sector); 318 319 remove_hash(sh); 320 321 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 322 sh->sector = sector; 323 stripe_set_idx(sector, conf, previous, sh); 324 sh->state = 0; 325 326 327 for (i = sh->disks; i--; ) { 328 struct r5dev *dev = &sh->dev[i]; 329 330 if (dev->toread || dev->read || dev->towrite || dev->written || 331 test_bit(R5_LOCKED, &dev->flags)) { 332 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 333 (unsigned long long)sh->sector, i, dev->toread, 334 dev->read, dev->towrite, dev->written, 335 test_bit(R5_LOCKED, &dev->flags)); 336 BUG(); 337 } 338 dev->flags = 0; 339 raid5_build_block(sh, i); 340 } 341 insert_hash(conf, sh); 342} 343 344static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) 345{ 346 struct stripe_head *sh; 347 struct hlist_node *hn; 348 349 CHECK_DEVLOCK(); 350 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 351 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 352 if (sh->sector == sector && sh->disks == disks) 353 return sh; 354 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 355 return NULL; 356} 357 358static void unplug_slaves(mddev_t *mddev); 359static void raid5_unplug_device(struct request_queue *q); 360 361static struct stripe_head * 362get_active_stripe(raid5_conf_t *conf, sector_t sector, 363 int previous, int noblock) 364{ 365 struct stripe_head *sh; 366 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 367 368 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 369 370 spin_lock_irq(&conf->device_lock); 371 372 do { 373 wait_event_lock_irq(conf->wait_for_stripe, 374 conf->quiesce == 0, 375 conf->device_lock, /* nothing */); 376 sh = __find_stripe(conf, sector, disks); 377 if (!sh) { 378 if (!conf->inactive_blocked) 379 sh = get_free_stripe(conf); 380 if (noblock && sh == NULL) 381 break; 382 if (!sh) { 383 conf->inactive_blocked = 1; 384 wait_event_lock_irq(conf->wait_for_stripe, 385 !list_empty(&conf->inactive_list) && 386 (atomic_read(&conf->active_stripes) 387 < (conf->max_nr_stripes *3/4) 388 || !conf->inactive_blocked), 389 conf->device_lock, 390 raid5_unplug_device(conf->mddev->queue) 391 ); 392 conf->inactive_blocked = 0; 393 } else 394 init_stripe(sh, sector, previous); 395 } else { 396 if (atomic_read(&sh->count)) { 397 BUG_ON(!list_empty(&sh->lru)); 398 } else { 399 if (!test_bit(STRIPE_HANDLE, &sh->state)) 400 atomic_inc(&conf->active_stripes); 401 if (list_empty(&sh->lru) && 402 !test_bit(STRIPE_EXPANDING, &sh->state)) 403 BUG(); 404 list_del_init(&sh->lru); 405 } 406 } 407 } while (sh == NULL); 408 409 if (sh) 410 atomic_inc(&sh->count); 411 412 spin_unlock_irq(&conf->device_lock); 413 return sh; 414} 415 416static void 417raid5_end_read_request(struct bio *bi, int error); 418static void 419raid5_end_write_request(struct bio *bi, int error); 420 421static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 422{ 423 raid5_conf_t *conf = sh->raid_conf; 424 int i, disks = sh->disks; 425 426 might_sleep(); 427 428 for (i = disks; i--; ) { 429 int rw; 430 struct bio *bi; 431 mdk_rdev_t *rdev; 432 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 433 rw = WRITE; 434 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 435 rw = READ; 436 else 437 continue; 438 439 bi = &sh->dev[i].req; 440 441 bi->bi_rw = rw; 442 if (rw == WRITE) 443 bi->bi_end_io = raid5_end_write_request; 444 else 445 bi->bi_end_io = raid5_end_read_request; 446 447 rcu_read_lock(); 448 rdev = rcu_dereference(conf->disks[i].rdev); 449 if (rdev && test_bit(Faulty, &rdev->flags)) 450 rdev = NULL; 451 if (rdev) 452 atomic_inc(&rdev->nr_pending); 453 rcu_read_unlock(); 454 455 if (rdev) { 456 if (s->syncing || s->expanding || s->expanded) 457 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 458 459 set_bit(STRIPE_IO_STARTED, &sh->state); 460 461 bi->bi_bdev = rdev->bdev; 462 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 463 __func__, (unsigned long long)sh->sector, 464 bi->bi_rw, i); 465 atomic_inc(&sh->count); 466 bi->bi_sector = sh->sector + rdev->data_offset; 467 bi->bi_flags = 1 << BIO_UPTODATE; 468 bi->bi_vcnt = 1; 469 bi->bi_max_vecs = 1; 470 bi->bi_idx = 0; 471 bi->bi_io_vec = &sh->dev[i].vec; 472 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 473 bi->bi_io_vec[0].bv_offset = 0; 474 bi->bi_size = STRIPE_SIZE; 475 bi->bi_next = NULL; 476 if (rw == WRITE && 477 test_bit(R5_ReWrite, &sh->dev[i].flags)) 478 atomic_add(STRIPE_SECTORS, 479 &rdev->corrected_errors); 480 generic_make_request(bi); 481 } else { 482 if (rw == WRITE) 483 set_bit(STRIPE_DEGRADED, &sh->state); 484 pr_debug("skip op %ld on disc %d for sector %llu\n", 485 bi->bi_rw, i, (unsigned long long)sh->sector); 486 clear_bit(R5_LOCKED, &sh->dev[i].flags); 487 set_bit(STRIPE_HANDLE, &sh->state); 488 } 489 } 490} 491 492static struct dma_async_tx_descriptor * 493async_copy_data(int frombio, struct bio *bio, struct page *page, 494 sector_t sector, struct dma_async_tx_descriptor *tx) 495{ 496 struct bio_vec *bvl; 497 struct page *bio_page; 498 int i; 499 int page_offset; 500 501 if (bio->bi_sector >= sector) 502 page_offset = (signed)(bio->bi_sector - sector) * 512; 503 else 504 page_offset = (signed)(sector - bio->bi_sector) * -512; 505 bio_for_each_segment(bvl, bio, i) { 506 int len = bio_iovec_idx(bio, i)->bv_len; 507 int clen; 508 int b_offset = 0; 509 510 if (page_offset < 0) { 511 b_offset = -page_offset; 512 page_offset += b_offset; 513 len -= b_offset; 514 } 515 516 if (len > 0 && page_offset + len > STRIPE_SIZE) 517 clen = STRIPE_SIZE - page_offset; 518 else 519 clen = len; 520 521 if (clen > 0) { 522 b_offset += bio_iovec_idx(bio, i)->bv_offset; 523 bio_page = bio_iovec_idx(bio, i)->bv_page; 524 if (frombio) 525 tx = async_memcpy(page, bio_page, page_offset, 526 b_offset, clen, 527 ASYNC_TX_DEP_ACK, 528 tx, NULL, NULL); 529 else 530 tx = async_memcpy(bio_page, page, b_offset, 531 page_offset, clen, 532 ASYNC_TX_DEP_ACK, 533 tx, NULL, NULL); 534 } 535 if (clen < len) /* hit end of page */ 536 break; 537 page_offset += len; 538 } 539 540 return tx; 541} 542 543static void ops_complete_biofill(void *stripe_head_ref) 544{ 545 struct stripe_head *sh = stripe_head_ref; 546 struct bio *return_bi = NULL; 547 raid5_conf_t *conf = sh->raid_conf; 548 int i; 549 550 pr_debug("%s: stripe %llu\n", __func__, 551 (unsigned long long)sh->sector); 552 553 /* clear completed biofills */ 554 spin_lock_irq(&conf->device_lock); 555 for (i = sh->disks; i--; ) { 556 struct r5dev *dev = &sh->dev[i]; 557 558 /* acknowledge completion of a biofill operation */ 559 /* and check if we need to reply to a read request, 560 * new R5_Wantfill requests are held off until 561 * !STRIPE_BIOFILL_RUN 562 */ 563 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 564 struct bio *rbi, *rbi2; 565 566 BUG_ON(!dev->read); 567 rbi = dev->read; 568 dev->read = NULL; 569 while (rbi && rbi->bi_sector < 570 dev->sector + STRIPE_SECTORS) { 571 rbi2 = r5_next_bio(rbi, dev->sector); 572 if (!raid5_dec_bi_phys_segments(rbi)) { 573 rbi->bi_next = return_bi; 574 return_bi = rbi; 575 } 576 rbi = rbi2; 577 } 578 } 579 } 580 spin_unlock_irq(&conf->device_lock); 581 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 582 583 return_io(return_bi); 584 585 set_bit(STRIPE_HANDLE, &sh->state); 586 release_stripe(sh); 587} 588 589static void ops_run_biofill(struct stripe_head *sh) 590{ 591 struct dma_async_tx_descriptor *tx = NULL; 592 raid5_conf_t *conf = sh->raid_conf; 593 int i; 594 595 pr_debug("%s: stripe %llu\n", __func__, 596 (unsigned long long)sh->sector); 597 598 for (i = sh->disks; i--; ) { 599 struct r5dev *dev = &sh->dev[i]; 600 if (test_bit(R5_Wantfill, &dev->flags)) { 601 struct bio *rbi; 602 spin_lock_irq(&conf->device_lock); 603 dev->read = rbi = dev->toread; 604 dev->toread = NULL; 605 spin_unlock_irq(&conf->device_lock); 606 while (rbi && rbi->bi_sector < 607 dev->sector + STRIPE_SECTORS) { 608 tx = async_copy_data(0, rbi, dev->page, 609 dev->sector, tx); 610 rbi = r5_next_bio(rbi, dev->sector); 611 } 612 } 613 } 614 615 atomic_inc(&sh->count); 616 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 617 ops_complete_biofill, sh); 618} 619 620static void ops_complete_compute5(void *stripe_head_ref) 621{ 622 struct stripe_head *sh = stripe_head_ref; 623 int target = sh->ops.target; 624 struct r5dev *tgt = &sh->dev[target]; 625 626 pr_debug("%s: stripe %llu\n", __func__, 627 (unsigned long long)sh->sector); 628 629 set_bit(R5_UPTODATE, &tgt->flags); 630 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 631 clear_bit(R5_Wantcompute, &tgt->flags); 632 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 633 if (sh->check_state == check_state_compute_run) 634 sh->check_state = check_state_compute_result; 635 set_bit(STRIPE_HANDLE, &sh->state); 636 release_stripe(sh); 637} 638 639static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) 640{ 641 /* kernel stack size limits the total number of disks */ 642 int disks = sh->disks; 643 struct page *xor_srcs[disks]; 644 int target = sh->ops.target; 645 struct r5dev *tgt = &sh->dev[target]; 646 struct page *xor_dest = tgt->page; 647 int count = 0; 648 struct dma_async_tx_descriptor *tx; 649 int i; 650 651 pr_debug("%s: stripe %llu block: %d\n", 652 __func__, (unsigned long long)sh->sector, target); 653 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 654 655 for (i = disks; i--; ) 656 if (i != target) 657 xor_srcs[count++] = sh->dev[i].page; 658 659 atomic_inc(&sh->count); 660 661 if (unlikely(count == 1)) 662 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 663 0, NULL, ops_complete_compute5, sh); 664 else 665 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 666 ASYNC_TX_XOR_ZERO_DST, NULL, 667 ops_complete_compute5, sh); 668 669 return tx; 670} 671 672static void ops_complete_prexor(void *stripe_head_ref) 673{ 674 struct stripe_head *sh = stripe_head_ref; 675 676 pr_debug("%s: stripe %llu\n", __func__, 677 (unsigned long long)sh->sector); 678} 679 680static struct dma_async_tx_descriptor * 681ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 682{ 683 /* kernel stack size limits the total number of disks */ 684 int disks = sh->disks; 685 struct page *xor_srcs[disks]; 686 int count = 0, pd_idx = sh->pd_idx, i; 687 688 /* existing parity data subtracted */ 689 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 690 691 pr_debug("%s: stripe %llu\n", __func__, 692 (unsigned long long)sh->sector); 693 694 for (i = disks; i--; ) { 695 struct r5dev *dev = &sh->dev[i]; 696 /* Only process blocks that are known to be uptodate */ 697 if (test_bit(R5_Wantdrain, &dev->flags)) 698 xor_srcs[count++] = dev->page; 699 } 700 701 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 702 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, 703 ops_complete_prexor, sh); 704 705 return tx; 706} 707 708static struct dma_async_tx_descriptor * 709ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 710{ 711 int disks = sh->disks; 712 int i; 713 714 pr_debug("%s: stripe %llu\n", __func__, 715 (unsigned long long)sh->sector); 716 717 for (i = disks; i--; ) { 718 struct r5dev *dev = &sh->dev[i]; 719 struct bio *chosen; 720 721 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 722 struct bio *wbi; 723 724 spin_lock(&sh->lock); 725 chosen = dev->towrite; 726 dev->towrite = NULL; 727 BUG_ON(dev->written); 728 wbi = dev->written = chosen; 729 spin_unlock(&sh->lock); 730 731 while (wbi && wbi->bi_sector < 732 dev->sector + STRIPE_SECTORS) { 733 tx = async_copy_data(1, wbi, dev->page, 734 dev->sector, tx); 735 wbi = r5_next_bio(wbi, dev->sector); 736 } 737 } 738 } 739 740 return tx; 741} 742 743static void ops_complete_postxor(void *stripe_head_ref) 744{ 745 struct stripe_head *sh = stripe_head_ref; 746 int disks = sh->disks, i, pd_idx = sh->pd_idx; 747 748 pr_debug("%s: stripe %llu\n", __func__, 749 (unsigned long long)sh->sector); 750 751 for (i = disks; i--; ) { 752 struct r5dev *dev = &sh->dev[i]; 753 if (dev->written || i == pd_idx) 754 set_bit(R5_UPTODATE, &dev->flags); 755 } 756 757 if (sh->reconstruct_state == reconstruct_state_drain_run) 758 sh->reconstruct_state = reconstruct_state_drain_result; 759 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 760 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 761 else { 762 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 763 sh->reconstruct_state = reconstruct_state_result; 764 } 765 766 set_bit(STRIPE_HANDLE, &sh->state); 767 release_stripe(sh); 768} 769 770static void 771ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 772{ 773 /* kernel stack size limits the total number of disks */ 774 int disks = sh->disks; 775 struct page *xor_srcs[disks]; 776 777 int count = 0, pd_idx = sh->pd_idx, i; 778 struct page *xor_dest; 779 int prexor = 0; 780 unsigned long flags; 781 782 pr_debug("%s: stripe %llu\n", __func__, 783 (unsigned long long)sh->sector); 784 785 /* check if prexor is active which means only process blocks 786 * that are part of a read-modify-write (written) 787 */ 788 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 789 prexor = 1; 790 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 791 for (i = disks; i--; ) { 792 struct r5dev *dev = &sh->dev[i]; 793 if (dev->written) 794 xor_srcs[count++] = dev->page; 795 } 796 } else { 797 xor_dest = sh->dev[pd_idx].page; 798 for (i = disks; i--; ) { 799 struct r5dev *dev = &sh->dev[i]; 800 if (i != pd_idx) 801 xor_srcs[count++] = dev->page; 802 } 803 } 804 805 /* 1/ if we prexor'd then the dest is reused as a source 806 * 2/ if we did not prexor then we are redoing the parity 807 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 808 * for the synchronous xor case 809 */ 810 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | 811 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 812 813 atomic_inc(&sh->count); 814 815 if (unlikely(count == 1)) { 816 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 817 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 818 flags, tx, ops_complete_postxor, sh); 819 } else 820 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 821 flags, tx, ops_complete_postxor, sh); 822} 823 824static void ops_complete_check(void *stripe_head_ref) 825{ 826 struct stripe_head *sh = stripe_head_ref; 827 828 pr_debug("%s: stripe %llu\n", __func__, 829 (unsigned long long)sh->sector); 830 831 sh->check_state = check_state_check_result; 832 set_bit(STRIPE_HANDLE, &sh->state); 833 release_stripe(sh); 834} 835 836static void ops_run_check(struct stripe_head *sh) 837{ 838 /* kernel stack size limits the total number of disks */ 839 int disks = sh->disks; 840 struct page *xor_srcs[disks]; 841 struct dma_async_tx_descriptor *tx; 842 843 int count = 0, pd_idx = sh->pd_idx, i; 844 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 845 846 pr_debug("%s: stripe %llu\n", __func__, 847 (unsigned long long)sh->sector); 848 849 for (i = disks; i--; ) { 850 struct r5dev *dev = &sh->dev[i]; 851 if (i != pd_idx) 852 xor_srcs[count++] = dev->page; 853 } 854 855 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 856 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 857 858 atomic_inc(&sh->count); 859 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 860 ops_complete_check, sh); 861} 862 863static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) 864{ 865 int overlap_clear = 0, i, disks = sh->disks; 866 struct dma_async_tx_descriptor *tx = NULL; 867 868 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 869 ops_run_biofill(sh); 870 overlap_clear++; 871 } 872 873 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 874 tx = ops_run_compute5(sh); 875 /* terminate the chain if postxor is not set to be run */ 876 if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) 877 async_tx_ack(tx); 878 } 879 880 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 881 tx = ops_run_prexor(sh, tx); 882 883 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 884 tx = ops_run_biodrain(sh, tx); 885 overlap_clear++; 886 } 887 888 if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) 889 ops_run_postxor(sh, tx); 890 891 if (test_bit(STRIPE_OP_CHECK, &ops_request)) 892 ops_run_check(sh); 893 894 if (overlap_clear) 895 for (i = disks; i--; ) { 896 struct r5dev *dev = &sh->dev[i]; 897 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 898 wake_up(&sh->raid_conf->wait_for_overlap); 899 } 900} 901 902static int grow_one_stripe(raid5_conf_t *conf) 903{ 904 struct stripe_head *sh; 905 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 906 if (!sh) 907 return 0; 908 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 909 sh->raid_conf = conf; 910 spin_lock_init(&sh->lock); 911 912 if (grow_buffers(sh, conf->raid_disks)) { 913 shrink_buffers(sh, conf->raid_disks); 914 kmem_cache_free(conf->slab_cache, sh); 915 return 0; 916 } 917 sh->disks = conf->raid_disks; 918 /* we just created an active stripe so... */ 919 atomic_set(&sh->count, 1); 920 atomic_inc(&conf->active_stripes); 921 INIT_LIST_HEAD(&sh->lru); 922 release_stripe(sh); 923 return 1; 924} 925 926static int grow_stripes(raid5_conf_t *conf, int num) 927{ 928 struct kmem_cache *sc; 929 int devs = conf->raid_disks; 930 931 sprintf(conf->cache_name[0], 932 "raid%d-%s", conf->level, mdname(conf->mddev)); 933 sprintf(conf->cache_name[1], 934 "raid%d-%s-alt", conf->level, mdname(conf->mddev)); 935 conf->active_name = 0; 936 sc = kmem_cache_create(conf->cache_name[conf->active_name], 937 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 938 0, 0, NULL); 939 if (!sc) 940 return 1; 941 conf->slab_cache = sc; 942 conf->pool_size = devs; 943 while (num--) 944 if (!grow_one_stripe(conf)) 945 return 1; 946 return 0; 947} 948 949#ifdef CONFIG_MD_RAID5_RESHAPE 950static int resize_stripes(raid5_conf_t *conf, int newsize) 951{ 952 /* Make all the stripes able to hold 'newsize' devices. 953 * New slots in each stripe get 'page' set to a new page. 954 * 955 * This happens in stages: 956 * 1/ create a new kmem_cache and allocate the required number of 957 * stripe_heads. 958 * 2/ gather all the old stripe_heads and tranfer the pages across 959 * to the new stripe_heads. This will have the side effect of 960 * freezing the array as once all stripe_heads have been collected, 961 * no IO will be possible. Old stripe heads are freed once their 962 * pages have been transferred over, and the old kmem_cache is 963 * freed when all stripes are done. 964 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 965 * we simple return a failre status - no need to clean anything up. 966 * 4/ allocate new pages for the new slots in the new stripe_heads. 967 * If this fails, we don't bother trying the shrink the 968 * stripe_heads down again, we just leave them as they are. 969 * As each stripe_head is processed the new one is released into 970 * active service. 971 * 972 * Once step2 is started, we cannot afford to wait for a write, 973 * so we use GFP_NOIO allocations. 974 */ 975 struct stripe_head *osh, *nsh; 976 LIST_HEAD(newstripes); 977 struct disk_info *ndisks; 978 int err; 979 struct kmem_cache *sc; 980 int i; 981 982 if (newsize <= conf->pool_size) 983 return 0; /* never bother to shrink */ 984 985 err = md_allow_write(conf->mddev); 986 if (err) 987 return err; 988 989 /* Step 1 */ 990 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 991 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 992 0, 0, NULL); 993 if (!sc) 994 return -ENOMEM; 995 996 for (i = conf->max_nr_stripes; i; i--) { 997 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 998 if (!nsh) 999 break; 1000 1001 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 1002 1003 nsh->raid_conf = conf; 1004 spin_lock_init(&nsh->lock); 1005 1006 list_add(&nsh->lru, &newstripes); 1007 } 1008 if (i) { 1009 /* didn't get enough, give up */ 1010 while (!list_empty(&newstripes)) { 1011 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1012 list_del(&nsh->lru); 1013 kmem_cache_free(sc, nsh); 1014 } 1015 kmem_cache_destroy(sc); 1016 return -ENOMEM; 1017 } 1018 /* Step 2 - Must use GFP_NOIO now. 1019 * OK, we have enough stripes, start collecting inactive 1020 * stripes and copying them over 1021 */ 1022 list_for_each_entry(nsh, &newstripes, lru) { 1023 spin_lock_irq(&conf->device_lock); 1024 wait_event_lock_irq(conf->wait_for_stripe, 1025 !list_empty(&conf->inactive_list), 1026 conf->device_lock, 1027 unplug_slaves(conf->mddev) 1028 ); 1029 osh = get_free_stripe(conf); 1030 spin_unlock_irq(&conf->device_lock); 1031 atomic_set(&nsh->count, 1); 1032 for(i=0; i<conf->pool_size; i++) 1033 nsh->dev[i].page = osh->dev[i].page; 1034 for( ; i<newsize; i++) 1035 nsh->dev[i].page = NULL; 1036 kmem_cache_free(conf->slab_cache, osh); 1037 } 1038 kmem_cache_destroy(conf->slab_cache); 1039 1040 /* Step 3. 1041 * At this point, we are holding all the stripes so the array 1042 * is completely stalled, so now is a good time to resize 1043 * conf->disks. 1044 */ 1045 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1046 if (ndisks) { 1047 for (i=0; i<conf->raid_disks; i++) 1048 ndisks[i] = conf->disks[i]; 1049 kfree(conf->disks); 1050 conf->disks = ndisks; 1051 } else 1052 err = -ENOMEM; 1053 1054 /* Step 4, return new stripes to service */ 1055 while(!list_empty(&newstripes)) { 1056 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1057 list_del_init(&nsh->lru); 1058 for (i=conf->raid_disks; i < newsize; i++) 1059 if (nsh->dev[i].page == NULL) { 1060 struct page *p = alloc_page(GFP_NOIO); 1061 nsh->dev[i].page = p; 1062 if (!p) 1063 err = -ENOMEM; 1064 } 1065 release_stripe(nsh); 1066 } 1067 /* critical section pass, GFP_NOIO no longer needed */ 1068 1069 conf->slab_cache = sc; 1070 conf->active_name = 1-conf->active_name; 1071 conf->pool_size = newsize; 1072 return err; 1073} 1074#endif 1075 1076static int drop_one_stripe(raid5_conf_t *conf) 1077{ 1078 struct stripe_head *sh; 1079 1080 spin_lock_irq(&conf->device_lock); 1081 sh = get_free_stripe(conf); 1082 spin_unlock_irq(&conf->device_lock); 1083 if (!sh) 1084 return 0; 1085 BUG_ON(atomic_read(&sh->count)); 1086 shrink_buffers(sh, conf->pool_size); 1087 kmem_cache_free(conf->slab_cache, sh); 1088 atomic_dec(&conf->active_stripes); 1089 return 1; 1090} 1091 1092static void shrink_stripes(raid5_conf_t *conf) 1093{ 1094 while (drop_one_stripe(conf)) 1095 ; 1096 1097 if (conf->slab_cache) 1098 kmem_cache_destroy(conf->slab_cache); 1099 conf->slab_cache = NULL; 1100} 1101 1102static void raid5_end_read_request(struct bio * bi, int error) 1103{ 1104 struct stripe_head *sh = bi->bi_private; 1105 raid5_conf_t *conf = sh->raid_conf; 1106 int disks = sh->disks, i; 1107 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1108 char b[BDEVNAME_SIZE]; 1109 mdk_rdev_t *rdev; 1110 1111 1112 for (i=0 ; i<disks; i++) 1113 if (bi == &sh->dev[i].req) 1114 break; 1115 1116 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1117 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1118 uptodate); 1119 if (i == disks) { 1120 BUG(); 1121 return; 1122 } 1123 1124 if (uptodate) { 1125 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1126 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1127 rdev = conf->disks[i].rdev; 1128 printk_rl(KERN_INFO "raid5:%s: read error corrected" 1129 " (%lu sectors at %llu on %s)\n", 1130 mdname(conf->mddev), STRIPE_SECTORS, 1131 (unsigned long long)(sh->sector 1132 + rdev->data_offset), 1133 bdevname(rdev->bdev, b)); 1134 clear_bit(R5_ReadError, &sh->dev[i].flags); 1135 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1136 } 1137 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1138 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1139 } else { 1140 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1141 int retry = 0; 1142 rdev = conf->disks[i].rdev; 1143 1144 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1145 atomic_inc(&rdev->read_errors); 1146 if (conf->mddev->degraded) 1147 printk_rl(KERN_WARNING 1148 "raid5:%s: read error not correctable " 1149 "(sector %llu on %s).\n", 1150 mdname(conf->mddev), 1151 (unsigned long long)(sh->sector 1152 + rdev->data_offset), 1153 bdn); 1154 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1155 /* Oh, no!!! */ 1156 printk_rl(KERN_WARNING 1157 "raid5:%s: read error NOT corrected!! " 1158 "(sector %llu on %s).\n", 1159 mdname(conf->mddev), 1160 (unsigned long long)(sh->sector 1161 + rdev->data_offset), 1162 bdn); 1163 else if (atomic_read(&rdev->read_errors) 1164 > conf->max_nr_stripes) 1165 printk(KERN_WARNING 1166 "raid5:%s: Too many read errors, failing device %s.\n", 1167 mdname(conf->mddev), bdn); 1168 else 1169 retry = 1; 1170 if (retry) 1171 set_bit(R5_ReadError, &sh->dev[i].flags); 1172 else { 1173 clear_bit(R5_ReadError, &sh->dev[i].flags); 1174 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1175 md_error(conf->mddev, rdev); 1176 } 1177 } 1178 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1179 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1180 set_bit(STRIPE_HANDLE, &sh->state); 1181 release_stripe(sh); 1182} 1183 1184static void raid5_end_write_request(struct bio *bi, int error) 1185{ 1186 struct stripe_head *sh = bi->bi_private; 1187 raid5_conf_t *conf = sh->raid_conf; 1188 int disks = sh->disks, i; 1189 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1190 1191 for (i=0 ; i<disks; i++) 1192 if (bi == &sh->dev[i].req) 1193 break; 1194 1195 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1196 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1197 uptodate); 1198 if (i == disks) { 1199 BUG(); 1200 return; 1201 } 1202 1203 if (!uptodate) 1204 md_error(conf->mddev, conf->disks[i].rdev); 1205 1206 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1207 1208 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1209 set_bit(STRIPE_HANDLE, &sh->state); 1210 release_stripe(sh); 1211} 1212 1213 1214static sector_t compute_blocknr(struct stripe_head *sh, int i); 1215 1216static void raid5_build_block(struct stripe_head *sh, int i) 1217{ 1218 struct r5dev *dev = &sh->dev[i]; 1219 1220 bio_init(&dev->req); 1221 dev->req.bi_io_vec = &dev->vec; 1222 dev->req.bi_vcnt++; 1223 dev->req.bi_max_vecs++; 1224 dev->vec.bv_page = dev->page; 1225 dev->vec.bv_len = STRIPE_SIZE; 1226 dev->vec.bv_offset = 0; 1227 1228 dev->req.bi_sector = sh->sector; 1229 dev->req.bi_private = sh; 1230 1231 dev->flags = 0; 1232 dev->sector = compute_blocknr(sh, i); 1233} 1234 1235static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1236{ 1237 char b[BDEVNAME_SIZE]; 1238 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1239 pr_debug("raid5: error called\n"); 1240 1241 if (!test_bit(Faulty, &rdev->flags)) { 1242 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1243 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1244 unsigned long flags; 1245 spin_lock_irqsave(&conf->device_lock, flags); 1246 mddev->degraded++; 1247 spin_unlock_irqrestore(&conf->device_lock, flags); 1248 /* 1249 * if recovery was running, make sure it aborts. 1250 */ 1251 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1252 } 1253 set_bit(Faulty, &rdev->flags); 1254 printk(KERN_ALERT 1255 "raid5: Disk failure on %s, disabling device.\n" 1256 "raid5: Operation continuing on %d devices.\n", 1257 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1258 } 1259} 1260 1261/* 1262 * Input: a 'big' sector number, 1263 * Output: index of the data and parity disk, and the sector # in them. 1264 */ 1265static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, 1266 int previous, int *dd_idx, 1267 struct stripe_head *sh) 1268{ 1269 long stripe; 1270 unsigned long chunk_number; 1271 unsigned int chunk_offset; 1272 int pd_idx, qd_idx; 1273 int ddf_layout = 0; 1274 sector_t new_sector; 1275 int sectors_per_chunk = conf->chunk_size >> 9; 1276 int raid_disks = previous ? conf->previous_raid_disks 1277 : conf->raid_disks; 1278 int data_disks = raid_disks - conf->max_degraded; 1279 1280 /* First compute the information on this sector */ 1281 1282 /* 1283 * Compute the chunk number and the sector offset inside the chunk 1284 */ 1285 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1286 chunk_number = r_sector; 1287 BUG_ON(r_sector != chunk_number); 1288 1289 /* 1290 * Compute the stripe number 1291 */ 1292 stripe = chunk_number / data_disks; 1293 1294 /* 1295 * Compute the data disk and parity disk indexes inside the stripe 1296 */ 1297 *dd_idx = chunk_number % data_disks; 1298 1299 /* 1300 * Select the parity disk based on the user selected algorithm. 1301 */ 1302 pd_idx = qd_idx = ~0; 1303 switch(conf->level) { 1304 case 4: 1305 pd_idx = data_disks; 1306 break; 1307 case 5: 1308 switch (conf->algorithm) { 1309 case ALGORITHM_LEFT_ASYMMETRIC: 1310 pd_idx = data_disks - stripe % raid_disks; 1311 if (*dd_idx >= pd_idx) 1312 (*dd_idx)++; 1313 break; 1314 case ALGORITHM_RIGHT_ASYMMETRIC: 1315 pd_idx = stripe % raid_disks; 1316 if (*dd_idx >= pd_idx) 1317 (*dd_idx)++; 1318 break; 1319 case ALGORITHM_LEFT_SYMMETRIC: 1320 pd_idx = data_disks - stripe % raid_disks; 1321 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1322 break; 1323 case ALGORITHM_RIGHT_SYMMETRIC: 1324 pd_idx = stripe % raid_disks; 1325 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1326 break; 1327 case ALGORITHM_PARITY_0: 1328 pd_idx = 0; 1329 (*dd_idx)++; 1330 break; 1331 case ALGORITHM_PARITY_N: 1332 pd_idx = data_disks; 1333 break; 1334 default: 1335 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1336 conf->algorithm); 1337 BUG(); 1338 } 1339 break; 1340 case 6: 1341 1342 switch (conf->algorithm) { 1343 case ALGORITHM_LEFT_ASYMMETRIC: 1344 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1345 qd_idx = pd_idx + 1; 1346 if (pd_idx == raid_disks-1) { 1347 (*dd_idx)++; /* Q D D D P */ 1348 qd_idx = 0; 1349 } else if (*dd_idx >= pd_idx) 1350 (*dd_idx) += 2; /* D D P Q D */ 1351 break; 1352 case ALGORITHM_RIGHT_ASYMMETRIC: 1353 pd_idx = stripe % raid_disks; 1354 qd_idx = pd_idx + 1; 1355 if (pd_idx == raid_disks-1) { 1356 (*dd_idx)++; /* Q D D D P */ 1357 qd_idx = 0; 1358 } else if (*dd_idx >= pd_idx) 1359 (*dd_idx) += 2; /* D D P Q D */ 1360 break; 1361 case ALGORITHM_LEFT_SYMMETRIC: 1362 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1363 qd_idx = (pd_idx + 1) % raid_disks; 1364 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1365 break; 1366 case ALGORITHM_RIGHT_SYMMETRIC: 1367 pd_idx = stripe % raid_disks; 1368 qd_idx = (pd_idx + 1) % raid_disks; 1369 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1370 break; 1371 1372 case ALGORITHM_PARITY_0: 1373 pd_idx = 0; 1374 qd_idx = 1; 1375 (*dd_idx) += 2; 1376 break; 1377 case ALGORITHM_PARITY_N: 1378 pd_idx = data_disks; 1379 qd_idx = data_disks + 1; 1380 break; 1381 1382 case ALGORITHM_ROTATING_ZERO_RESTART: 1383 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1384 * of blocks for computing Q is different. 1385 */ 1386 pd_idx = stripe % raid_disks; 1387 qd_idx = pd_idx + 1; 1388 if (pd_idx == raid_disks-1) { 1389 (*dd_idx)++; /* Q D D D P */ 1390 qd_idx = 0; 1391 } else if (*dd_idx >= pd_idx) 1392 (*dd_idx) += 2; /* D D P Q D */ 1393 ddf_layout = 1; 1394 break; 1395 1396 case ALGORITHM_ROTATING_N_RESTART: 1397 /* Same a left_asymmetric, by first stripe is 1398 * D D D P Q rather than 1399 * Q D D D P 1400 */ 1401 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); 1402 qd_idx = pd_idx + 1; 1403 if (pd_idx == raid_disks-1) { 1404 (*dd_idx)++; /* Q D D D P */ 1405 qd_idx = 0; 1406 } else if (*dd_idx >= pd_idx) 1407 (*dd_idx) += 2; /* D D P Q D */ 1408 ddf_layout = 1; 1409 break; 1410 1411 case ALGORITHM_ROTATING_N_CONTINUE: 1412 /* Same as left_symmetric but Q is before P */ 1413 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1414 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1415 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1416 ddf_layout = 1; 1417 break; 1418 1419 case ALGORITHM_LEFT_ASYMMETRIC_6: 1420 /* RAID5 left_asymmetric, with Q on last device */ 1421 pd_idx = data_disks - stripe % (raid_disks-1); 1422 if (*dd_idx >= pd_idx) 1423 (*dd_idx)++; 1424 qd_idx = raid_disks - 1; 1425 break; 1426 1427 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1428 pd_idx = stripe % (raid_disks-1); 1429 if (*dd_idx >= pd_idx) 1430 (*dd_idx)++; 1431 qd_idx = raid_disks - 1; 1432 break; 1433 1434 case ALGORITHM_LEFT_SYMMETRIC_6: 1435 pd_idx = data_disks - stripe % (raid_disks-1); 1436 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1437 qd_idx = raid_disks - 1; 1438 break; 1439 1440 case ALGORITHM_RIGHT_SYMMETRIC_6: 1441 pd_idx = stripe % (raid_disks-1); 1442 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1443 qd_idx = raid_disks - 1; 1444 break; 1445 1446 case ALGORITHM_PARITY_0_6: 1447 pd_idx = 0; 1448 (*dd_idx)++; 1449 qd_idx = raid_disks - 1; 1450 break; 1451 1452 1453 default: 1454 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1455 conf->algorithm); 1456 BUG(); 1457 } 1458 break; 1459 } 1460 1461 if (sh) { 1462 sh->pd_idx = pd_idx; 1463 sh->qd_idx = qd_idx; 1464 sh->ddf_layout = ddf_layout; 1465 } 1466 /* 1467 * Finally, compute the new sector number 1468 */ 1469 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1470 return new_sector; 1471} 1472 1473 1474static sector_t compute_blocknr(struct stripe_head *sh, int i) 1475{ 1476 raid5_conf_t *conf = sh->raid_conf; 1477 int raid_disks = sh->disks; 1478 int data_disks = raid_disks - conf->max_degraded; 1479 sector_t new_sector = sh->sector, check; 1480 int sectors_per_chunk = conf->chunk_size >> 9; 1481 sector_t stripe; 1482 int chunk_offset; 1483 int chunk_number, dummy1, dd_idx = i; 1484 sector_t r_sector; 1485 struct stripe_head sh2; 1486 1487 1488 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1489 stripe = new_sector; 1490 BUG_ON(new_sector != stripe); 1491 1492 if (i == sh->pd_idx) 1493 return 0; 1494 switch(conf->level) { 1495 case 4: break; 1496 case 5: 1497 switch (conf->algorithm) { 1498 case ALGORITHM_LEFT_ASYMMETRIC: 1499 case ALGORITHM_RIGHT_ASYMMETRIC: 1500 if (i > sh->pd_idx) 1501 i--; 1502 break; 1503 case ALGORITHM_LEFT_SYMMETRIC: 1504 case ALGORITHM_RIGHT_SYMMETRIC: 1505 if (i < sh->pd_idx) 1506 i += raid_disks; 1507 i -= (sh->pd_idx + 1); 1508 break; 1509 case ALGORITHM_PARITY_0: 1510 i -= 1; 1511 break; 1512 case ALGORITHM_PARITY_N: 1513 break; 1514 default: 1515 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1516 conf->algorithm); 1517 BUG(); 1518 } 1519 break; 1520 case 6: 1521 if (i == sh->qd_idx) 1522 return 0; /* It is the Q disk */ 1523 switch (conf->algorithm) { 1524 case ALGORITHM_LEFT_ASYMMETRIC: 1525 case ALGORITHM_RIGHT_ASYMMETRIC: 1526 case ALGORITHM_ROTATING_ZERO_RESTART: 1527 case ALGORITHM_ROTATING_N_RESTART: 1528 if (sh->pd_idx == raid_disks-1) 1529 i--; /* Q D D D P */ 1530 else if (i > sh->pd_idx) 1531 i -= 2; /* D D P Q D */ 1532 break; 1533 case ALGORITHM_LEFT_SYMMETRIC: 1534 case ALGORITHM_RIGHT_SYMMETRIC: 1535 if (sh->pd_idx == raid_disks-1) 1536 i--; /* Q D D D P */ 1537 else { 1538 /* D D P Q D */ 1539 if (i < sh->pd_idx) 1540 i += raid_disks; 1541 i -= (sh->pd_idx + 2); 1542 } 1543 break; 1544 case ALGORITHM_PARITY_0: 1545 i -= 2; 1546 break; 1547 case ALGORITHM_PARITY_N: 1548 break; 1549 case ALGORITHM_ROTATING_N_CONTINUE: 1550 if (sh->pd_idx == 0) 1551 i--; /* P D D D Q */ 1552 else if (i > sh->pd_idx) 1553 i -= 2; /* D D Q P D */ 1554 break; 1555 case ALGORITHM_LEFT_ASYMMETRIC_6: 1556 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1557 if (i > sh->pd_idx) 1558 i--; 1559 break; 1560 case ALGORITHM_LEFT_SYMMETRIC_6: 1561 case ALGORITHM_RIGHT_SYMMETRIC_6: 1562 if (i < sh->pd_idx) 1563 i += data_disks + 1; 1564 i -= (sh->pd_idx + 1); 1565 break; 1566 case ALGORITHM_PARITY_0_6: 1567 i -= 1; 1568 break; 1569 default: 1570 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1571 conf->algorithm); 1572 BUG(); 1573 } 1574 break; 1575 } 1576 1577 chunk_number = stripe * data_disks + i; 1578 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1579 1580 check = raid5_compute_sector(conf, r_sector, 1581 (raid_disks != conf->raid_disks), 1582 &dummy1, &sh2); 1583 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 1584 || sh2.qd_idx != sh->qd_idx) { 1585 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1586 return 0; 1587 } 1588 return r_sector; 1589} 1590 1591 1592 1593/* 1594 * Copy data between a page in the stripe cache, and one or more bion 1595 * The page could align with the middle of the bio, or there could be 1596 * several bion, each with several bio_vecs, which cover part of the page 1597 * Multiple bion are linked together on bi_next. There may be extras 1598 * at the end of this list. We ignore them. 1599 */ 1600static void copy_data(int frombio, struct bio *bio, 1601 struct page *page, 1602 sector_t sector) 1603{ 1604 char *pa = page_address(page); 1605 struct bio_vec *bvl; 1606 int i; 1607 int page_offset; 1608 1609 if (bio->bi_sector >= sector) 1610 page_offset = (signed)(bio->bi_sector - sector) * 512; 1611 else 1612 page_offset = (signed)(sector - bio->bi_sector) * -512; 1613 bio_for_each_segment(bvl, bio, i) { 1614 int len = bio_iovec_idx(bio,i)->bv_len; 1615 int clen; 1616 int b_offset = 0; 1617 1618 if (page_offset < 0) { 1619 b_offset = -page_offset; 1620 page_offset += b_offset; 1621 len -= b_offset; 1622 } 1623 1624 if (len > 0 && page_offset + len > STRIPE_SIZE) 1625 clen = STRIPE_SIZE - page_offset; 1626 else clen = len; 1627 1628 if (clen > 0) { 1629 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 1630 if (frombio) 1631 memcpy(pa+page_offset, ba+b_offset, clen); 1632 else 1633 memcpy(ba+b_offset, pa+page_offset, clen); 1634 __bio_kunmap_atomic(ba, KM_USER0); 1635 } 1636 if (clen < len) /* hit end of page */ 1637 break; 1638 page_offset += len; 1639 } 1640} 1641 1642#define check_xor() do { \ 1643 if (count == MAX_XOR_BLOCKS) { \ 1644 xor_blocks(count, STRIPE_SIZE, dest, ptr);\ 1645 count = 0; \ 1646 } \ 1647 } while(0) 1648 1649static void compute_parity6(struct stripe_head *sh, int method) 1650{ 1651 raid5_conf_t *conf = sh->raid_conf; 1652 int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count; 1653 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1654 struct bio *chosen; 1655 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1656 void *ptrs[syndrome_disks+2]; 1657 1658 pd_idx = sh->pd_idx; 1659 qd_idx = sh->qd_idx; 1660 d0_idx = raid6_d0(sh); 1661 1662 pr_debug("compute_parity, stripe %llu, method %d\n", 1663 (unsigned long long)sh->sector, method); 1664 1665 switch(method) { 1666 case READ_MODIFY_WRITE: 1667 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1668 case RECONSTRUCT_WRITE: 1669 for (i= disks; i-- ;) 1670 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1671 chosen = sh->dev[i].towrite; 1672 sh->dev[i].towrite = NULL; 1673 1674 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1675 wake_up(&conf->wait_for_overlap); 1676 1677 BUG_ON(sh->dev[i].written); 1678 sh->dev[i].written = chosen; 1679 } 1680 break; 1681 case CHECK_PARITY: 1682 BUG(); /* Not implemented yet */ 1683 } 1684 1685 for (i = disks; i--;) 1686 if (sh->dev[i].written) { 1687 sector_t sector = sh->dev[i].sector; 1688 struct bio *wbi = sh->dev[i].written; 1689 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1690 copy_data(1, wbi, sh->dev[i].page, sector); 1691 wbi = r5_next_bio(wbi, sector); 1692 } 1693 1694 set_bit(R5_LOCKED, &sh->dev[i].flags); 1695 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1696 } 1697 1698 /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/ 1699 1700 for (i = 0; i < disks; i++) 1701 ptrs[i] = (void *)raid6_empty_zero_page; 1702 1703 count = 0; 1704 i = d0_idx; 1705 do { 1706 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1707 1708 ptrs[slot] = page_address(sh->dev[i].page); 1709 if (slot < syndrome_disks && 1710 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) { 1711 printk(KERN_ERR "block %d/%d not uptodate " 1712 "on parity calc\n", i, count); 1713 BUG(); 1714 } 1715 1716 i = raid6_next_disk(i, disks); 1717 } while (i != d0_idx); 1718 BUG_ON(count != syndrome_disks); 1719 1720 raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs); 1721 1722 switch(method) { 1723 case RECONSTRUCT_WRITE: 1724 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1725 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1726 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1727 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1728 break; 1729 case UPDATE_PARITY: 1730 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1731 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1732 break; 1733 } 1734} 1735 1736 1737/* Compute one missing block */ 1738static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1739{ 1740 int i, count, disks = sh->disks; 1741 void *ptr[MAX_XOR_BLOCKS], *dest, *p; 1742 int qd_idx = sh->qd_idx; 1743 1744 pr_debug("compute_block_1, stripe %llu, idx %d\n", 1745 (unsigned long long)sh->sector, dd_idx); 1746 1747 if ( dd_idx == qd_idx ) { 1748 /* We're actually computing the Q drive */ 1749 compute_parity6(sh, UPDATE_PARITY); 1750 } else { 1751 dest = page_address(sh->dev[dd_idx].page); 1752 if (!nozero) memset(dest, 0, STRIPE_SIZE); 1753 count = 0; 1754 for (i = disks ; i--; ) { 1755 if (i == dd_idx || i == qd_idx) 1756 continue; 1757 p = page_address(sh->dev[i].page); 1758 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1759 ptr[count++] = p; 1760 else 1761 printk("compute_block() %d, stripe %llu, %d" 1762 " not present\n", dd_idx, 1763 (unsigned long long)sh->sector, i); 1764 1765 check_xor(); 1766 } 1767 if (count) 1768 xor_blocks(count, STRIPE_SIZE, dest, ptr); 1769 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1770 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1771 } 1772} 1773 1774/* Compute two missing blocks */ 1775static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1776{ 1777 int i, count, disks = sh->disks; 1778 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1779 int d0_idx = raid6_d0(sh); 1780 int faila = -1, failb = -1; 1781 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1782 void *ptrs[syndrome_disks+2]; 1783 1784 for (i = 0; i < disks ; i++) 1785 ptrs[i] = (void *)raid6_empty_zero_page; 1786 count = 0; 1787 i = d0_idx; 1788 do { 1789 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1790 1791 ptrs[slot] = page_address(sh->dev[i].page); 1792 1793 if (i == dd_idx1) 1794 faila = slot; 1795 if (i == dd_idx2) 1796 failb = slot; 1797 i = raid6_next_disk(i, disks); 1798 } while (i != d0_idx); 1799 BUG_ON(count != syndrome_disks); 1800 1801 BUG_ON(faila == failb); 1802 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1803 1804 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1805 (unsigned long long)sh->sector, dd_idx1, dd_idx2, 1806 faila, failb); 1807 1808 if (failb == syndrome_disks+1) { 1809 /* Q disk is one of the missing disks */ 1810 if (faila == syndrome_disks) { 1811 /* Missing P+Q, just recompute */ 1812 compute_parity6(sh, UPDATE_PARITY); 1813 return; 1814 } else { 1815 /* We're missing D+Q; recompute D from P */ 1816 compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ? 1817 dd_idx2 : dd_idx1), 1818 0); 1819 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1820 return; 1821 } 1822 } 1823 1824 /* We're missing D+P or D+D; */ 1825 if (failb == syndrome_disks) { 1826 /* We're missing D+P. */ 1827 raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs); 1828 } else { 1829 /* We're missing D+D. */ 1830 raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb, 1831 ptrs); 1832 } 1833 1834 /* Both the above update both missing blocks */ 1835 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1836 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1837} 1838 1839static void 1840schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, 1841 int rcw, int expand) 1842{ 1843 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1844 1845 if (rcw) { 1846 /* if we are not expanding this is a proper write request, and 1847 * there will be bios with new data to be drained into the 1848 * stripe cache 1849 */ 1850 if (!expand) { 1851 sh->reconstruct_state = reconstruct_state_drain_run; 1852 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1853 } else 1854 sh->reconstruct_state = reconstruct_state_run; 1855 1856 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1857 1858 for (i = disks; i--; ) { 1859 struct r5dev *dev = &sh->dev[i]; 1860 1861 if (dev->towrite) { 1862 set_bit(R5_LOCKED, &dev->flags); 1863 set_bit(R5_Wantdrain, &dev->flags); 1864 if (!expand) 1865 clear_bit(R5_UPTODATE, &dev->flags); 1866 s->locked++; 1867 } 1868 } 1869 if (s->locked + 1 == disks) 1870 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 1871 atomic_inc(&sh->raid_conf->pending_full_writes); 1872 } else { 1873 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1874 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1875 1876 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 1877 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 1878 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1879 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1880 1881 for (i = disks; i--; ) { 1882 struct r5dev *dev = &sh->dev[i]; 1883 if (i == pd_idx) 1884 continue; 1885 1886 if (dev->towrite && 1887 (test_bit(R5_UPTODATE, &dev->flags) || 1888 test_bit(R5_Wantcompute, &dev->flags))) { 1889 set_bit(R5_Wantdrain, &dev->flags); 1890 set_bit(R5_LOCKED, &dev->flags); 1891 clear_bit(R5_UPTODATE, &dev->flags); 1892 s->locked++; 1893 } 1894 } 1895 } 1896 1897 /* keep the parity disk locked while asynchronous operations 1898 * are in flight 1899 */ 1900 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1901 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1902 s->locked++; 1903 1904 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 1905 __func__, (unsigned long long)sh->sector, 1906 s->locked, s->ops_request); 1907} 1908 1909/* 1910 * Each stripe/dev can have one or more bion attached. 1911 * toread/towrite point to the first in a chain. 1912 * The bi_next chain must be in order. 1913 */ 1914static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1915{ 1916 struct bio **bip; 1917 raid5_conf_t *conf = sh->raid_conf; 1918 int firstwrite=0; 1919 1920 pr_debug("adding bh b#%llu to stripe s#%llu\n", 1921 (unsigned long long)bi->bi_sector, 1922 (unsigned long long)sh->sector); 1923 1924 1925 spin_lock(&sh->lock); 1926 spin_lock_irq(&conf->device_lock); 1927 if (forwrite) { 1928 bip = &sh->dev[dd_idx].towrite; 1929 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1930 firstwrite = 1; 1931 } else 1932 bip = &sh->dev[dd_idx].toread; 1933 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1934 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1935 goto overlap; 1936 bip = & (*bip)->bi_next; 1937 } 1938 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1939 goto overlap; 1940 1941 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1942 if (*bip) 1943 bi->bi_next = *bip; 1944 *bip = bi; 1945 bi->bi_phys_segments++; 1946 spin_unlock_irq(&conf->device_lock); 1947 spin_unlock(&sh->lock); 1948 1949 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 1950 (unsigned long long)bi->bi_sector, 1951 (unsigned long long)sh->sector, dd_idx); 1952 1953 if (conf->mddev->bitmap && firstwrite) { 1954 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1955 STRIPE_SECTORS, 0); 1956 sh->bm_seq = conf->seq_flush+1; 1957 set_bit(STRIPE_BIT_DELAY, &sh->state); 1958 } 1959 1960 if (forwrite) { 1961 /* check if page is covered */ 1962 sector_t sector = sh->dev[dd_idx].sector; 1963 for (bi=sh->dev[dd_idx].towrite; 1964 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1965 bi && bi->bi_sector <= sector; 1966 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1967 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1968 sector = bi->bi_sector + (bi->bi_size>>9); 1969 } 1970 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1971 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1972 } 1973 return 1; 1974 1975 overlap: 1976 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1977 spin_unlock_irq(&conf->device_lock); 1978 spin_unlock(&sh->lock); 1979 return 0; 1980} 1981 1982static void end_reshape(raid5_conf_t *conf); 1983 1984static int page_is_zero(struct page *p) 1985{ 1986 char *a = page_address(p); 1987 return ((*(u32*)a) == 0 && 1988 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1989} 1990 1991static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 1992 struct stripe_head *sh) 1993{ 1994 int sectors_per_chunk = conf->chunk_size >> 9; 1995 int dd_idx; 1996 int chunk_offset = sector_div(stripe, sectors_per_chunk); 1997 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 1998 1999 raid5_compute_sector(conf, 2000 stripe * (disks - conf->max_degraded) 2001 *sectors_per_chunk + chunk_offset, 2002 previous, 2003 &dd_idx, sh); 2004} 2005 2006static void 2007handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, 2008 struct stripe_head_state *s, int disks, 2009 struct bio **return_bi) 2010{ 2011 int i; 2012 for (i = disks; i--; ) { 2013 struct bio *bi; 2014 int bitmap_end = 0; 2015 2016 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2017 mdk_rdev_t *rdev; 2018 rcu_read_lock(); 2019 rdev = rcu_dereference(conf->disks[i].rdev); 2020 if (rdev && test_bit(In_sync, &rdev->flags)) 2021 /* multiple read failures in one stripe */ 2022 md_error(conf->mddev, rdev); 2023 rcu_read_unlock(); 2024 } 2025 spin_lock_irq(&conf->device_lock); 2026 /* fail all writes first */ 2027 bi = sh->dev[i].towrite; 2028 sh->dev[i].towrite = NULL; 2029 if (bi) { 2030 s->to_write--; 2031 bitmap_end = 1; 2032 } 2033 2034 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2035 wake_up(&conf->wait_for_overlap); 2036 2037 while (bi && bi->bi_sector < 2038 sh->dev[i].sector + STRIPE_SECTORS) { 2039 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2040 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2041 if (!raid5_dec_bi_phys_segments(bi)) { 2042 md_write_end(conf->mddev); 2043 bi->bi_next = *return_bi; 2044 *return_bi = bi; 2045 } 2046 bi = nextbi; 2047 } 2048 /* and fail all 'written' */ 2049 bi = sh->dev[i].written; 2050 sh->dev[i].written = NULL; 2051 if (bi) bitmap_end = 1; 2052 while (bi && bi->bi_sector < 2053 sh->dev[i].sector + STRIPE_SECTORS) { 2054 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2055 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2056 if (!raid5_dec_bi_phys_segments(bi)) { 2057 md_write_end(conf->mddev); 2058 bi->bi_next = *return_bi; 2059 *return_bi = bi; 2060 } 2061 bi = bi2; 2062 } 2063 2064 /* fail any reads if this device is non-operational and 2065 * the data has not reached the cache yet. 2066 */ 2067 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2068 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2069 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2070 bi = sh->dev[i].toread; 2071 sh->dev[i].toread = NULL; 2072 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2073 wake_up(&conf->wait_for_overlap); 2074 if (bi) s->to_read--; 2075 while (bi && bi->bi_sector < 2076 sh->dev[i].sector + STRIPE_SECTORS) { 2077 struct bio *nextbi = 2078 r5_next_bio(bi, sh->dev[i].sector); 2079 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2080 if (!raid5_dec_bi_phys_segments(bi)) { 2081 bi->bi_next = *return_bi; 2082 *return_bi = bi; 2083 } 2084 bi = nextbi; 2085 } 2086 } 2087 spin_unlock_irq(&conf->device_lock); 2088 if (bitmap_end) 2089 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2090 STRIPE_SECTORS, 0, 0); 2091 } 2092 2093 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2094 if (atomic_dec_and_test(&conf->pending_full_writes)) 2095 md_wakeup_thread(conf->mddev->thread); 2096} 2097 2098/* fetch_block5 - checks the given member device to see if its data needs 2099 * to be read or computed to satisfy a request. 2100 * 2101 * Returns 1 when no more member devices need to be checked, otherwise returns 2102 * 0 to tell the loop in handle_stripe_fill5 to continue 2103 */ 2104static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, 2105 int disk_idx, int disks) 2106{ 2107 struct r5dev *dev = &sh->dev[disk_idx]; 2108 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 2109 2110 /* is the data in this block needed, and can we get it? */ 2111 if (!test_bit(R5_LOCKED, &dev->flags) && 2112 !test_bit(R5_UPTODATE, &dev->flags) && 2113 (dev->toread || 2114 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2115 s->syncing || s->expanding || 2116 (s->failed && 2117 (failed_dev->toread || 2118 (failed_dev->towrite && 2119 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { 2120 /* We would like to get this block, possibly by computing it, 2121 * otherwise read it if the backing disk is insync 2122 */ 2123 if ((s->uptodate == disks - 1) && 2124 (s->failed && disk_idx == s->failed_num)) { 2125 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2126 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2127 set_bit(R5_Wantcompute, &dev->flags); 2128 sh->ops.target = disk_idx; 2129 s->req_compute = 1; 2130 /* Careful: from this point on 'uptodate' is in the eye 2131 * of raid5_run_ops which services 'compute' operations 2132 * before writes. R5_Wantcompute flags a block that will 2133 * be R5_UPTODATE by the time it is needed for a 2134 * subsequent operation. 2135 */ 2136 s->uptodate++; 2137 return 1; /* uptodate + compute == disks */ 2138 } else if (test_bit(R5_Insync, &dev->flags)) { 2139 set_bit(R5_LOCKED, &dev->flags); 2140 set_bit(R5_Wantread, &dev->flags); 2141 s->locked++; 2142 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2143 s->syncing); 2144 } 2145 } 2146 2147 return 0; 2148} 2149 2150/** 2151 * handle_stripe_fill5 - read or compute data to satisfy pending requests. 2152 */ 2153static void handle_stripe_fill5(struct stripe_head *sh, 2154 struct stripe_head_state *s, int disks) 2155{ 2156 int i; 2157 2158 /* look for blocks to read/compute, skip this if a compute 2159 * is already in flight, or if the stripe contents are in the 2160 * midst of changing due to a write 2161 */ 2162 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2163 !sh->reconstruct_state) 2164 for (i = disks; i--; ) 2165 if (fetch_block5(sh, s, i, disks)) 2166 break; 2167 set_bit(STRIPE_HANDLE, &sh->state); 2168} 2169 2170static void handle_stripe_fill6(struct stripe_head *sh, 2171 struct stripe_head_state *s, struct r6_state *r6s, 2172 int disks) 2173{ 2174 int i; 2175 for (i = disks; i--; ) { 2176 struct r5dev *dev = &sh->dev[i]; 2177 if (!test_bit(R5_LOCKED, &dev->flags) && 2178 !test_bit(R5_UPTODATE, &dev->flags) && 2179 (dev->toread || (dev->towrite && 2180 !test_bit(R5_OVERWRITE, &dev->flags)) || 2181 s->syncing || s->expanding || 2182 (s->failed >= 1 && 2183 (sh->dev[r6s->failed_num[0]].toread || 2184 s->to_write)) || 2185 (s->failed >= 2 && 2186 (sh->dev[r6s->failed_num[1]].toread || 2187 s->to_write)))) { 2188 /* we would like to get this block, possibly 2189 * by computing it, but we might not be able to 2190 */ 2191 if ((s->uptodate == disks - 1) && 2192 (s->failed && (i == r6s->failed_num[0] || 2193 i == r6s->failed_num[1]))) { 2194 pr_debug("Computing stripe %llu block %d\n", 2195 (unsigned long long)sh->sector, i); 2196 compute_block_1(sh, i, 0); 2197 s->uptodate++; 2198 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { 2199 /* Computing 2-failure is *very* expensive; only 2200 * do it if failed >= 2 2201 */ 2202 int other; 2203 for (other = disks; other--; ) { 2204 if (other == i) 2205 continue; 2206 if (!test_bit(R5_UPTODATE, 2207 &sh->dev[other].flags)) 2208 break; 2209 } 2210 BUG_ON(other < 0); 2211 pr_debug("Computing stripe %llu blocks %d,%d\n", 2212 (unsigned long long)sh->sector, 2213 i, other); 2214 compute_block_2(sh, i, other); 2215 s->uptodate += 2; 2216 } else if (test_bit(R5_Insync, &dev->flags)) { 2217 set_bit(R5_LOCKED, &dev->flags); 2218 set_bit(R5_Wantread, &dev->flags); 2219 s->locked++; 2220 pr_debug("Reading block %d (sync=%d)\n", 2221 i, s->syncing); 2222 } 2223 } 2224 } 2225 set_bit(STRIPE_HANDLE, &sh->state); 2226} 2227 2228 2229/* handle_stripe_clean_event 2230 * any written block on an uptodate or failed drive can be returned. 2231 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2232 * never LOCKED, so we don't need to test 'failed' directly. 2233 */ 2234static void handle_stripe_clean_event(raid5_conf_t *conf, 2235 struct stripe_head *sh, int disks, struct bio **return_bi) 2236{ 2237 int i; 2238 struct r5dev *dev; 2239 2240 for (i = disks; i--; ) 2241 if (sh->dev[i].written) { 2242 dev = &sh->dev[i]; 2243 if (!test_bit(R5_LOCKED, &dev->flags) && 2244 test_bit(R5_UPTODATE, &dev->flags)) { 2245 /* We can return any write requests */ 2246 struct bio *wbi, *wbi2; 2247 int bitmap_end = 0; 2248 pr_debug("Return write for disc %d\n", i); 2249 spin_lock_irq(&conf->device_lock); 2250 wbi = dev->written; 2251 dev->written = NULL; 2252 while (wbi && wbi->bi_sector < 2253 dev->sector + STRIPE_SECTORS) { 2254 wbi2 = r5_next_bio(wbi, dev->sector); 2255 if (!raid5_dec_bi_phys_segments(wbi)) { 2256 md_write_end(conf->mddev); 2257 wbi->bi_next = *return_bi; 2258 *return_bi = wbi; 2259 } 2260 wbi = wbi2; 2261 } 2262 if (dev->towrite == NULL) 2263 bitmap_end = 1; 2264 spin_unlock_irq(&conf->device_lock); 2265 if (bitmap_end) 2266 bitmap_endwrite(conf->mddev->bitmap, 2267 sh->sector, 2268 STRIPE_SECTORS, 2269 !test_bit(STRIPE_DEGRADED, &sh->state), 2270 0); 2271 } 2272 } 2273 2274 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2275 if (atomic_dec_and_test(&conf->pending_full_writes)) 2276 md_wakeup_thread(conf->mddev->thread); 2277} 2278 2279static void handle_stripe_dirtying5(raid5_conf_t *conf, 2280 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2281{ 2282 int rmw = 0, rcw = 0, i; 2283 for (i = disks; i--; ) { 2284 /* would I have to read this buffer for read_modify_write */ 2285 struct r5dev *dev = &sh->dev[i]; 2286 if ((dev->towrite || i == sh->pd_idx) && 2287 !test_bit(R5_LOCKED, &dev->flags) && 2288 !(test_bit(R5_UPTODATE, &dev->flags) || 2289 test_bit(R5_Wantcompute, &dev->flags))) { 2290 if (test_bit(R5_Insync, &dev->flags)) 2291 rmw++; 2292 else 2293 rmw += 2*disks; /* cannot read it */ 2294 } 2295 /* Would I have to read this buffer for reconstruct_write */ 2296 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2297 !test_bit(R5_LOCKED, &dev->flags) && 2298 !(test_bit(R5_UPTODATE, &dev->flags) || 2299 test_bit(R5_Wantcompute, &dev->flags))) { 2300 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2301 else 2302 rcw += 2*disks; 2303 } 2304 } 2305 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2306 (unsigned long long)sh->sector, rmw, rcw); 2307 set_bit(STRIPE_HANDLE, &sh->state); 2308 if (rmw < rcw && rmw > 0) 2309 /* prefer read-modify-write, but need to get some data */ 2310 for (i = disks; i--; ) { 2311 struct r5dev *dev = &sh->dev[i]; 2312 if ((dev->towrite || i == sh->pd_idx) && 2313 !test_bit(R5_LOCKED, &dev->flags) && 2314 !(test_bit(R5_UPTODATE, &dev->flags) || 2315 test_bit(R5_Wantcompute, &dev->flags)) && 2316 test_bit(R5_Insync, &dev->flags)) { 2317 if ( 2318 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2319 pr_debug("Read_old block " 2320 "%d for r-m-w\n", i); 2321 set_bit(R5_LOCKED, &dev->flags); 2322 set_bit(R5_Wantread, &dev->flags); 2323 s->locked++; 2324 } else { 2325 set_bit(STRIPE_DELAYED, &sh->state); 2326 set_bit(STRIPE_HANDLE, &sh->state); 2327 } 2328 } 2329 } 2330 if (rcw <= rmw && rcw > 0) 2331 /* want reconstruct write, but need to get some data */ 2332 for (i = disks; i--; ) { 2333 struct r5dev *dev = &sh->dev[i]; 2334 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2335 i != sh->pd_idx && 2336 !test_bit(R5_LOCKED, &dev->flags) && 2337 !(test_bit(R5_UPTODATE, &dev->flags) || 2338 test_bit(R5_Wantcompute, &dev->flags)) && 2339 test_bit(R5_Insync, &dev->flags)) { 2340 if ( 2341 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2342 pr_debug("Read_old block " 2343 "%d for Reconstruct\n", i); 2344 set_bit(R5_LOCKED, &dev->flags); 2345 set_bit(R5_Wantread, &dev->flags); 2346 s->locked++; 2347 } else { 2348 set_bit(STRIPE_DELAYED, &sh->state); 2349 set_bit(STRIPE_HANDLE, &sh->state); 2350 } 2351 } 2352 } 2353 /* now if nothing is locked, and if we have enough data, 2354 * we can start a write request 2355 */ 2356 /* since handle_stripe can be called at any time we need to handle the 2357 * case where a compute block operation has been submitted and then a 2358 * subsequent call wants to start a write request. raid5_run_ops only 2359 * handles the case where compute block and postxor are requested 2360 * simultaneously. If this is not the case then new writes need to be 2361 * held off until the compute completes. 2362 */ 2363 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2364 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2365 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2366 schedule_reconstruction5(sh, s, rcw == 0, 0); 2367} 2368 2369static void handle_stripe_dirtying6(raid5_conf_t *conf, 2370 struct stripe_head *sh, struct stripe_head_state *s, 2371 struct r6_state *r6s, int disks) 2372{ 2373 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; 2374 int qd_idx = sh->qd_idx; 2375 for (i = disks; i--; ) { 2376 struct r5dev *dev = &sh->dev[i]; 2377 /* Would I have to read this buffer for reconstruct_write */ 2378 if (!test_bit(R5_OVERWRITE, &dev->flags) 2379 && i != pd_idx && i != qd_idx 2380 && (!test_bit(R5_LOCKED, &dev->flags) 2381 ) && 2382 !test_bit(R5_UPTODATE, &dev->flags)) { 2383 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2384 else { 2385 pr_debug("raid6: must_compute: " 2386 "disk %d flags=%#lx\n", i, dev->flags); 2387 must_compute++; 2388 } 2389 } 2390 } 2391 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", 2392 (unsigned long long)sh->sector, rcw, must_compute); 2393 set_bit(STRIPE_HANDLE, &sh->state); 2394 2395 if (rcw > 0) 2396 /* want reconstruct write, but need to get some data */ 2397 for (i = disks; i--; ) { 2398 struct r5dev *dev = &sh->dev[i]; 2399 if (!test_bit(R5_OVERWRITE, &dev->flags) 2400 && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) 2401 && !test_bit(R5_LOCKED, &dev->flags) && 2402 !test_bit(R5_UPTODATE, &dev->flags) && 2403 test_bit(R5_Insync, &dev->flags)) { 2404 if ( 2405 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2406 pr_debug("Read_old stripe %llu " 2407 "block %d for Reconstruct\n", 2408 (unsigned long long)sh->sector, i); 2409 set_bit(R5_LOCKED, &dev->flags); 2410 set_bit(R5_Wantread, &dev->flags); 2411 s->locked++; 2412 } else { 2413 pr_debug("Request delayed stripe %llu " 2414 "block %d for Reconstruct\n", 2415 (unsigned long long)sh->sector, i); 2416 set_bit(STRIPE_DELAYED, &sh->state); 2417 set_bit(STRIPE_HANDLE, &sh->state); 2418 } 2419 } 2420 } 2421 /* now if nothing is locked, and if we have enough data, we can start a 2422 * write request 2423 */ 2424 if (s->locked == 0 && rcw == 0 && 2425 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2426 if (must_compute > 0) { 2427 /* We have failed blocks and need to compute them */ 2428 switch (s->failed) { 2429 case 0: 2430 BUG(); 2431 case 1: 2432 compute_block_1(sh, r6s->failed_num[0], 0); 2433 break; 2434 case 2: 2435 compute_block_2(sh, r6s->failed_num[0], 2436 r6s->failed_num[1]); 2437 break; 2438 default: /* This request should have been failed? */ 2439 BUG(); 2440 } 2441 } 2442 2443 pr_debug("Computing parity for stripe %llu\n", 2444 (unsigned long long)sh->sector); 2445 compute_parity6(sh, RECONSTRUCT_WRITE); 2446 /* now every locked buffer is ready to be written */ 2447 for (i = disks; i--; ) 2448 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2449 pr_debug("Writing stripe %llu block %d\n", 2450 (unsigned long long)sh->sector, i); 2451 s->locked++; 2452 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2453 } 2454 if (s->locked == disks) 2455 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2456 atomic_inc(&conf->pending_full_writes); 2457 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2458 set_bit(STRIPE_INSYNC, &sh->state); 2459 2460 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2461 atomic_dec(&conf->preread_active_stripes); 2462 if (atomic_read(&conf->preread_active_stripes) < 2463 IO_THRESHOLD) 2464 md_wakeup_thread(conf->mddev->thread); 2465 } 2466 } 2467} 2468 2469static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2470 struct stripe_head_state *s, int disks) 2471{ 2472 struct r5dev *dev = NULL; 2473 2474 set_bit(STRIPE_HANDLE, &sh->state); 2475 2476 switch (sh->check_state) { 2477 case check_state_idle: 2478 /* start a new check operation if there are no failures */ 2479 if (s->failed == 0) { 2480 BUG_ON(s->uptodate != disks); 2481 sh->check_state = check_state_run; 2482 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2483 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2484 s->uptodate--; 2485 break; 2486 } 2487 dev = &sh->dev[s->failed_num]; 2488 /* fall through */ 2489 case check_state_compute_result: 2490 sh->check_state = check_state_idle; 2491 if (!dev) 2492 dev = &sh->dev[sh->pd_idx]; 2493 2494 /* check that a write has not made the stripe insync */ 2495 if (test_bit(STRIPE_INSYNC, &sh->state)) 2496 break; 2497 2498 /* either failed parity check, or recovery is happening */ 2499 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2500 BUG_ON(s->uptodate != disks); 2501 2502 set_bit(R5_LOCKED, &dev->flags); 2503 s->locked++; 2504 set_bit(R5_Wantwrite, &dev->flags); 2505 2506 clear_bit(STRIPE_DEGRADED, &sh->state); 2507 set_bit(STRIPE_INSYNC, &sh->state); 2508 break; 2509 case check_state_run: 2510 break; /* we will be called again upon completion */ 2511 case check_state_check_result: 2512 sh->check_state = check_state_idle; 2513 2514 /* if a failure occurred during the check operation, leave 2515 * STRIPE_INSYNC not set and let the stripe be handled again 2516 */ 2517 if (s->failed) 2518 break; 2519 2520 /* handle a successful check operation, if parity is correct 2521 * we are done. Otherwise update the mismatch count and repair 2522 * parity if !MD_RECOVERY_CHECK 2523 */ 2524 if (sh->ops.zero_sum_result == 0) 2525 /* parity is correct (on disc, 2526 * not in buffer any more) 2527 */ 2528 set_bit(STRIPE_INSYNC, &sh->state); 2529 else { 2530 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2531 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2532 /* don't try to repair!! */ 2533 set_bit(STRIPE_INSYNC, &sh->state); 2534 else { 2535 sh->check_state = check_state_compute_run; 2536 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2537 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2538 set_bit(R5_Wantcompute, 2539 &sh->dev[sh->pd_idx].flags); 2540 sh->ops.target = sh->pd_idx; 2541 s->uptodate++; 2542 } 2543 } 2544 break; 2545 case check_state_compute_run: 2546 break; 2547 default: 2548 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2549 __func__, sh->check_state, 2550 (unsigned long long) sh->sector); 2551 BUG(); 2552 } 2553} 2554 2555 2556static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2557 struct stripe_head_state *s, 2558 struct r6_state *r6s, struct page *tmp_page, 2559 int disks) 2560{ 2561 int update_p = 0, update_q = 0; 2562 struct r5dev *dev; 2563 int pd_idx = sh->pd_idx; 2564 int qd_idx = sh->qd_idx; 2565 2566 set_bit(STRIPE_HANDLE, &sh->state); 2567 2568 BUG_ON(s->failed > 2); 2569 BUG_ON(s->uptodate < disks); 2570 /* Want to check and possibly repair P and Q. 2571 * However there could be one 'failed' device, in which 2572 * case we can only check one of them, possibly using the 2573 * other to generate missing data 2574 */ 2575 2576 /* If !tmp_page, we cannot do the calculations, 2577 * but as we have set STRIPE_HANDLE, we will soon be called 2578 * by stripe_handle with a tmp_page - just wait until then. 2579 */ 2580 if (tmp_page) { 2581 if (s->failed == r6s->q_failed) { 2582 /* The only possible failed device holds 'Q', so it 2583 * makes sense to check P (If anything else were failed, 2584 * we would have used P to recreate it). 2585 */ 2586 compute_block_1(sh, pd_idx, 1); 2587 if (!page_is_zero(sh->dev[pd_idx].page)) { 2588 compute_block_1(sh, pd_idx, 0); 2589 update_p = 1; 2590 } 2591 } 2592 if (!r6s->q_failed && s->failed < 2) { 2593 /* q is not failed, and we didn't use it to generate 2594 * anything, so it makes sense to check it 2595 */ 2596 memcpy(page_address(tmp_page), 2597 page_address(sh->dev[qd_idx].page), 2598 STRIPE_SIZE); 2599 compute_parity6(sh, UPDATE_PARITY); 2600 if (memcmp(page_address(tmp_page), 2601 page_address(sh->dev[qd_idx].page), 2602 STRIPE_SIZE) != 0) { 2603 clear_bit(STRIPE_INSYNC, &sh->state); 2604 update_q = 1; 2605 } 2606 } 2607 if (update_p || update_q) { 2608 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2609 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2610 /* don't try to repair!! */ 2611 update_p = update_q = 0; 2612 } 2613 2614 /* now write out any block on a failed drive, 2615 * or P or Q if they need it 2616 */ 2617 2618 if (s->failed == 2) { 2619 dev = &sh->dev[r6s->failed_num[1]]; 2620 s->locked++; 2621 set_bit(R5_LOCKED, &dev->flags); 2622 set_bit(R5_Wantwrite, &dev->flags); 2623 } 2624 if (s->failed >= 1) { 2625 dev = &sh->dev[r6s->failed_num[0]]; 2626 s->locked++; 2627 set_bit(R5_LOCKED, &dev->flags); 2628 set_bit(R5_Wantwrite, &dev->flags); 2629 } 2630 2631 if (update_p) { 2632 dev = &sh->dev[pd_idx]; 2633 s->locked++; 2634 set_bit(R5_LOCKED, &dev->flags); 2635 set_bit(R5_Wantwrite, &dev->flags); 2636 } 2637 if (update_q) { 2638 dev = &sh->dev[qd_idx]; 2639 s->locked++; 2640 set_bit(R5_LOCKED, &dev->flags); 2641 set_bit(R5_Wantwrite, &dev->flags); 2642 } 2643 clear_bit(STRIPE_DEGRADED, &sh->state); 2644 2645 set_bit(STRIPE_INSYNC, &sh->state); 2646 } 2647} 2648 2649static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2650 struct r6_state *r6s) 2651{ 2652 int i; 2653 2654 /* We have read all the blocks in this stripe and now we need to 2655 * copy some of them into a target stripe for expand. 2656 */ 2657 struct dma_async_tx_descriptor *tx = NULL; 2658 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2659 for (i = 0; i < sh->disks; i++) 2660 if (i != sh->pd_idx && i != sh->qd_idx) { 2661 int dd_idx, j; 2662 struct stripe_head *sh2; 2663 2664 sector_t bn = compute_blocknr(sh, i); 2665 sector_t s = raid5_compute_sector(conf, bn, 0, 2666 &dd_idx, NULL); 2667 sh2 = get_active_stripe(conf, s, 0, 1); 2668 if (sh2 == NULL) 2669 /* so far only the early blocks of this stripe 2670 * have been requested. When later blocks 2671 * get requested, we will try again 2672 */ 2673 continue; 2674 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2675 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2676 /* must have already done this block */ 2677 release_stripe(sh2); 2678 continue; 2679 } 2680 2681 /* place all the copies on one channel */ 2682 tx = async_memcpy(sh2->dev[dd_idx].page, 2683 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2684 ASYNC_TX_DEP_ACK, tx, NULL, NULL); 2685 2686 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2687 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2688 for (j = 0; j < conf->raid_disks; j++) 2689 if (j != sh2->pd_idx && 2690 (!r6s || j != sh2->qd_idx) && 2691 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2692 break; 2693 if (j == conf->raid_disks) { 2694 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2695 set_bit(STRIPE_HANDLE, &sh2->state); 2696 } 2697 release_stripe(sh2); 2698 2699 } 2700 /* done submitting copies, wait for them to complete */ 2701 if (tx) { 2702 async_tx_ack(tx); 2703 dma_wait_for_async_tx(tx); 2704 } 2705} 2706 2707 2708/* 2709 * handle_stripe - do things to a stripe. 2710 * 2711 * We lock the stripe and then examine the state of various bits 2712 * to see what needs to be done. 2713 * Possible results: 2714 * return some read request which now have data 2715 * return some write requests which are safely on disc 2716 * schedule a read on some buffers 2717 * schedule a write of some buffers 2718 * return confirmation of parity correctness 2719 * 2720 * buffers are taken off read_list or write_list, and bh_cache buffers 2721 * get BH_Lock set before the stripe lock is released. 2722 * 2723 */ 2724 2725static bool handle_stripe5(struct stripe_head *sh) 2726{ 2727 raid5_conf_t *conf = sh->raid_conf; 2728 int disks = sh->disks, i; 2729 struct bio *return_bi = NULL; 2730 struct stripe_head_state s; 2731 struct r5dev *dev; 2732 mdk_rdev_t *blocked_rdev = NULL; 2733 int prexor; 2734 2735 memset(&s, 0, sizeof(s)); 2736 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " 2737 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, 2738 atomic_read(&sh->count), sh->pd_idx, sh->check_state, 2739 sh->reconstruct_state); 2740 2741 spin_lock(&sh->lock); 2742 clear_bit(STRIPE_HANDLE, &sh->state); 2743 clear_bit(STRIPE_DELAYED, &sh->state); 2744 2745 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2746 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2747 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2748 2749 /* Now to look around and see what can be done */ 2750 rcu_read_lock(); 2751 for (i=disks; i--; ) { 2752 mdk_rdev_t *rdev; 2753 struct r5dev *dev = &sh->dev[i]; 2754 clear_bit(R5_Insync, &dev->flags); 2755 2756 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 2757 "written %p\n", i, dev->flags, dev->toread, dev->read, 2758 dev->towrite, dev->written); 2759 2760 /* maybe we can request a biofill operation 2761 * 2762 * new wantfill requests are only permitted while 2763 * ops_complete_biofill is guaranteed to be inactive 2764 */ 2765 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2766 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 2767 set_bit(R5_Wantfill, &dev->flags); 2768 2769 /* now count some things */ 2770 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2771 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2772 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2773 2774 if (test_bit(R5_Wantfill, &dev->flags)) 2775 s.to_fill++; 2776 else if (dev->toread) 2777 s.to_read++; 2778 if (dev->towrite) { 2779 s.to_write++; 2780 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2781 s.non_overwrite++; 2782 } 2783 if (dev->written) 2784 s.written++; 2785 rdev = rcu_dereference(conf->disks[i].rdev); 2786 if (blocked_rdev == NULL && 2787 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2788 blocked_rdev = rdev; 2789 atomic_inc(&rdev->nr_pending); 2790 } 2791 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2792 /* The ReadError flag will just be confusing now */ 2793 clear_bit(R5_ReadError, &dev->flags); 2794 clear_bit(R5_ReWrite, &dev->flags); 2795 } 2796 if (!rdev || !test_bit(In_sync, &rdev->flags) 2797 || test_bit(R5_ReadError, &dev->flags)) { 2798 s.failed++; 2799 s.failed_num = i; 2800 } else 2801 set_bit(R5_Insync, &dev->flags); 2802 } 2803 rcu_read_unlock(); 2804 2805 if (unlikely(blocked_rdev)) { 2806 if (s.syncing || s.expanding || s.expanded || 2807 s.to_write || s.written) { 2808 set_bit(STRIPE_HANDLE, &sh->state); 2809 goto unlock; 2810 } 2811 /* There is nothing for the blocked_rdev to block */ 2812 rdev_dec_pending(blocked_rdev, conf->mddev); 2813 blocked_rdev = NULL; 2814 } 2815 2816 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 2817 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 2818 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 2819 } 2820 2821 pr_debug("locked=%d uptodate=%d to_read=%d" 2822 " to_write=%d failed=%d failed_num=%d\n", 2823 s.locked, s.uptodate, s.to_read, s.to_write, 2824 s.failed, s.failed_num); 2825 /* check if the array has lost two devices and, if so, some requests might 2826 * need to be failed 2827 */ 2828 if (s.failed > 1 && s.to_read+s.to_write+s.written) 2829 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 2830 if (s.failed > 1 && s.syncing) { 2831 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2832 clear_bit(STRIPE_SYNCING, &sh->state); 2833 s.syncing = 0; 2834 } 2835 2836 /* might be able to return some write requests if the parity block 2837 * is safe, or on a failed drive 2838 */ 2839 dev = &sh->dev[sh->pd_idx]; 2840 if ( s.written && 2841 ((test_bit(R5_Insync, &dev->flags) && 2842 !test_bit(R5_LOCKED, &dev->flags) && 2843 test_bit(R5_UPTODATE, &dev->flags)) || 2844 (s.failed == 1 && s.failed_num == sh->pd_idx))) 2845 handle_stripe_clean_event(conf, sh, disks, &return_bi); 2846 2847 /* Now we might consider reading some blocks, either to check/generate 2848 * parity, or to satisfy requests 2849 * or to load a block that is being partially written. 2850 */ 2851 if (s.to_read || s.non_overwrite || 2852 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 2853 handle_stripe_fill5(sh, &s, disks); 2854 2855 /* Now we check to see if any write operations have recently 2856 * completed 2857 */ 2858 prexor = 0; 2859 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 2860 prexor = 1; 2861 if (sh->reconstruct_state == reconstruct_state_drain_result || 2862 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 2863 sh->reconstruct_state = reconstruct_state_idle; 2864 2865 /* All the 'written' buffers and the parity block are ready to 2866 * be written back to disk 2867 */ 2868 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 2869 for (i = disks; i--; ) { 2870 dev = &sh->dev[i]; 2871 if (test_bit(R5_LOCKED, &dev->flags) && 2872 (i == sh->pd_idx || dev->written)) { 2873 pr_debug("Writing block %d\n", i); 2874 set_bit(R5_Wantwrite, &dev->flags); 2875 if (prexor) 2876 continue; 2877 if (!test_bit(R5_Insync, &dev->flags) || 2878 (i == sh->pd_idx && s.failed == 0)) 2879 set_bit(STRIPE_INSYNC, &sh->state); 2880 } 2881 } 2882 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2883 atomic_dec(&conf->preread_active_stripes); 2884 if (atomic_read(&conf->preread_active_stripes) < 2885 IO_THRESHOLD) 2886 md_wakeup_thread(conf->mddev->thread); 2887 } 2888 } 2889 2890 /* Now to consider new write requests and what else, if anything 2891 * should be read. We do not handle new writes when: 2892 * 1/ A 'write' operation (copy+xor) is already in flight. 2893 * 2/ A 'check' operation is in flight, as it may clobber the parity 2894 * block. 2895 */ 2896 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 2897 handle_stripe_dirtying5(conf, sh, &s, disks); 2898 2899 /* maybe we need to check and possibly fix the parity for this stripe 2900 * Any reads will already have been scheduled, so we just see if enough 2901 * data is available. The parity check is held off while parity 2902 * dependent operations are in flight. 2903 */ 2904 if (sh->check_state || 2905 (s.syncing && s.locked == 0 && 2906 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 2907 !test_bit(STRIPE_INSYNC, &sh->state))) 2908 handle_parity_checks5(conf, sh, &s, disks); 2909 2910 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2911 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2912 clear_bit(STRIPE_SYNCING, &sh->state); 2913 } 2914 2915 /* If the failed drive is just a ReadError, then we might need to progress 2916 * the repair/check process 2917 */ 2918 if (s.failed == 1 && !conf->mddev->ro && 2919 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) 2920 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) 2921 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) 2922 ) { 2923 dev = &sh->dev[s.failed_num]; 2924 if (!test_bit(R5_ReWrite, &dev->flags)) { 2925 set_bit(R5_Wantwrite, &dev->flags); 2926 set_bit(R5_ReWrite, &dev->flags); 2927 set_bit(R5_LOCKED, &dev->flags); 2928 s.locked++; 2929 } else { 2930 /* let's read it back */ 2931 set_bit(R5_Wantread, &dev->flags); 2932 set_bit(R5_LOCKED, &dev->flags); 2933 s.locked++; 2934 } 2935 } 2936 2937 /* Finish reconstruct operations initiated by the expansion process */ 2938 if (sh->reconstruct_state == reconstruct_state_result) { 2939 sh->reconstruct_state = reconstruct_state_idle; 2940 clear_bit(STRIPE_EXPANDING, &sh->state); 2941 for (i = conf->raid_disks; i--; ) { 2942 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2943 set_bit(R5_LOCKED, &sh->dev[i].flags); 2944 s.locked++; 2945 } 2946 } 2947 2948 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2949 !sh->reconstruct_state) { 2950 /* Need to write out all blocks after computing parity */ 2951 sh->disks = conf->raid_disks; 2952 stripe_set_idx(sh->sector, conf, 0, sh); 2953 schedule_reconstruction5(sh, &s, 1, 1); 2954 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 2955 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2956 atomic_dec(&conf->reshape_stripes); 2957 wake_up(&conf->wait_for_overlap); 2958 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 2959 } 2960 2961 if (s.expanding && s.locked == 0 && 2962 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 2963 handle_stripe_expansion(conf, sh, NULL); 2964 2965 unlock: 2966 spin_unlock(&sh->lock); 2967 2968 /* wait for this device to become unblocked */ 2969 if (unlikely(blocked_rdev)) 2970 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 2971 2972 if (s.ops_request) 2973 raid5_run_ops(sh, s.ops_request); 2974 2975 ops_run_io(sh, &s); 2976 2977 return_io(return_bi); 2978 2979 return blocked_rdev == NULL; 2980} 2981 2982static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 2983{ 2984 raid5_conf_t *conf = sh->raid_conf; 2985 int disks = sh->disks; 2986 struct bio *return_bi = NULL; 2987 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; 2988 struct stripe_head_state s; 2989 struct r6_state r6s; 2990 struct r5dev *dev, *pdev, *qdev; 2991 mdk_rdev_t *blocked_rdev = NULL; 2992 2993 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 2994 "pd_idx=%d, qd_idx=%d\n", 2995 (unsigned long long)sh->sector, sh->state, 2996 atomic_read(&sh->count), pd_idx, qd_idx); 2997 memset(&s, 0, sizeof(s)); 2998 2999 spin_lock(&sh->lock); 3000 clear_bit(STRIPE_HANDLE, &sh->state); 3001 clear_bit(STRIPE_DELAYED, &sh->state); 3002 3003 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 3004 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3005 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 3006 /* Now to look around and see what can be done */ 3007 3008 rcu_read_lock(); 3009 for (i=disks; i--; ) { 3010 mdk_rdev_t *rdev; 3011 dev = &sh->dev[i]; 3012 clear_bit(R5_Insync, &dev->flags); 3013 3014 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3015 i, dev->flags, dev->toread, dev->towrite, dev->written); 3016 /* maybe we can reply to a read */ 3017 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 3018 struct bio *rbi, *rbi2; 3019 pr_debug("Return read for disc %d\n", i); 3020 spin_lock_irq(&conf->device_lock); 3021 rbi = dev->toread; 3022 dev->toread = NULL; 3023 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 3024 wake_up(&conf->wait_for_overlap); 3025 spin_unlock_irq(&conf->device_lock); 3026 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 3027 copy_data(0, rbi, dev->page, dev->sector); 3028 rbi2 = r5_next_bio(rbi, dev->sector); 3029 spin_lock_irq(&conf->device_lock); 3030 if (!raid5_dec_bi_phys_segments(rbi)) { 3031 rbi->bi_next = return_bi; 3032 return_bi = rbi; 3033 } 3034 spin_unlock_irq(&conf->device_lock); 3035 rbi = rbi2; 3036 } 3037 } 3038 3039 /* now count some things */ 3040 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 3041 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 3042 3043 3044 if (dev->toread) 3045 s.to_read++; 3046 if (dev->towrite) { 3047 s.to_write++; 3048 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3049 s.non_overwrite++; 3050 } 3051 if (dev->written) 3052 s.written++; 3053 rdev = rcu_dereference(conf->disks[i].rdev); 3054 if (blocked_rdev == NULL && 3055 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 3056 blocked_rdev = rdev; 3057 atomic_inc(&rdev->nr_pending); 3058 } 3059 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 3060 /* The ReadError flag will just be confusing now */ 3061 clear_bit(R5_ReadError, &dev->flags); 3062 clear_bit(R5_ReWrite, &dev->flags); 3063 } 3064 if (!rdev || !test_bit(In_sync, &rdev->flags) 3065 || test_bit(R5_ReadError, &dev->flags)) { 3066 if (s.failed < 2) 3067 r6s.failed_num[s.failed] = i; 3068 s.failed++; 3069 } else 3070 set_bit(R5_Insync, &dev->flags); 3071 } 3072 rcu_read_unlock(); 3073 3074 if (unlikely(blocked_rdev)) { 3075 if (s.syncing || s.expanding || s.expanded || 3076 s.to_write || s.written) { 3077 set_bit(STRIPE_HANDLE, &sh->state); 3078 goto unlock; 3079 } 3080 /* There is nothing for the blocked_rdev to block */ 3081 rdev_dec_pending(blocked_rdev, conf->mddev); 3082 blocked_rdev = NULL; 3083 } 3084 3085 pr_debug("locked=%d uptodate=%d to_read=%d" 3086 " to_write=%d failed=%d failed_num=%d,%d\n", 3087 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3088 r6s.failed_num[0], r6s.failed_num[1]); 3089 /* check if the array has lost >2 devices and, if so, some requests 3090 * might need to be failed 3091 */ 3092 if (s.failed > 2 && s.to_read+s.to_write+s.written) 3093 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 3094 if (s.failed > 2 && s.syncing) { 3095 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3096 clear_bit(STRIPE_SYNCING, &sh->state); 3097 s.syncing = 0; 3098 } 3099 3100 /* 3101 * might be able to return some write requests if the parity blocks 3102 * are safe, or on a failed drive 3103 */ 3104 pdev = &sh->dev[pd_idx]; 3105 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 3106 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 3107 qdev = &sh->dev[qd_idx]; 3108 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx) 3109 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx); 3110 3111 if ( s.written && 3112 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3113 && !test_bit(R5_LOCKED, &pdev->flags) 3114 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3115 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3116 && !test_bit(R5_LOCKED, &qdev->flags) 3117 && test_bit(R5_UPTODATE, &qdev->flags))))) 3118 handle_stripe_clean_event(conf, sh, disks, &return_bi); 3119 3120 /* Now we might consider reading some blocks, either to check/generate 3121 * parity, or to satisfy requests 3122 * or to load a block that is being partially written. 3123 */ 3124 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 3125 (s.syncing && (s.uptodate < disks)) || s.expanding) 3126 handle_stripe_fill6(sh, &s, &r6s, disks); 3127 3128 /* now to consider writing and what else, if anything should be read */ 3129 if (s.to_write) 3130 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); 3131 3132 /* maybe we need to check and possibly fix the parity for this stripe 3133 * Any reads will already have been scheduled, so we just see if enough 3134 * data is available 3135 */ 3136 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) 3137 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); 3138 3139 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3140 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3141 clear_bit(STRIPE_SYNCING, &sh->state); 3142 } 3143 3144 /* If the failed drives are just a ReadError, then we might need 3145 * to progress the repair/check process 3146 */ 3147 if (s.failed <= 2 && !conf->mddev->ro) 3148 for (i = 0; i < s.failed; i++) { 3149 dev = &sh->dev[r6s.failed_num[i]]; 3150 if (test_bit(R5_ReadError, &dev->flags) 3151 && !test_bit(R5_LOCKED, &dev->flags) 3152 && test_bit(R5_UPTODATE, &dev->flags) 3153 ) { 3154 if (!test_bit(R5_ReWrite, &dev->flags)) { 3155 set_bit(R5_Wantwrite, &dev->flags); 3156 set_bit(R5_ReWrite, &dev->flags); 3157 set_bit(R5_LOCKED, &dev->flags); 3158 } else { 3159 /* let's read it back */ 3160 set_bit(R5_Wantread, &dev->flags); 3161 set_bit(R5_LOCKED, &dev->flags); 3162 } 3163 } 3164 } 3165 3166 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3167 /* Need to write out all blocks after computing P&Q */ 3168 sh->disks = conf->raid_disks; 3169 stripe_set_idx(sh->sector, conf, 0, sh); 3170 compute_parity6(sh, RECONSTRUCT_WRITE); 3171 for (i = conf->raid_disks ; i-- ; ) { 3172 set_bit(R5_LOCKED, &sh->dev[i].flags); 3173 s.locked++; 3174 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3175 } 3176 clear_bit(STRIPE_EXPANDING, &sh->state); 3177 } else if (s.expanded) { 3178 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3179 atomic_dec(&conf->reshape_stripes); 3180 wake_up(&conf->wait_for_overlap); 3181 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3182 } 3183 3184 if (s.expanding && s.locked == 0 && 3185 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3186 handle_stripe_expansion(conf, sh, &r6s); 3187 3188 unlock: 3189 spin_unlock(&sh->lock); 3190 3191 /* wait for this device to become unblocked */ 3192 if (unlikely(blocked_rdev)) 3193 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3194 3195 ops_run_io(sh, &s); 3196 3197 return_io(return_bi); 3198 3199 return blocked_rdev == NULL; 3200} 3201 3202/* returns true if the stripe was handled */ 3203static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page) 3204{ 3205 if (sh->raid_conf->level == 6) 3206 return handle_stripe6(sh, tmp_page); 3207 else 3208 return handle_stripe5(sh); 3209} 3210 3211 3212 3213static void raid5_activate_delayed(raid5_conf_t *conf) 3214{ 3215 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3216 while (!list_empty(&conf->delayed_list)) { 3217 struct list_head *l = conf->delayed_list.next; 3218 struct stripe_head *sh; 3219 sh = list_entry(l, struct stripe_head, lru); 3220 list_del_init(l); 3221 clear_bit(STRIPE_DELAYED, &sh->state); 3222 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3223 atomic_inc(&conf->preread_active_stripes); 3224 list_add_tail(&sh->lru, &conf->hold_list); 3225 } 3226 } else 3227 blk_plug_device(conf->mddev->queue); 3228} 3229 3230static void activate_bit_delay(raid5_conf_t *conf) 3231{ 3232 /* device_lock is held */ 3233 struct list_head head; 3234 list_add(&head, &conf->bitmap_list); 3235 list_del_init(&conf->bitmap_list); 3236 while (!list_empty(&head)) { 3237 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3238 list_del_init(&sh->lru); 3239 atomic_inc(&sh->count); 3240 __release_stripe(conf, sh); 3241 } 3242} 3243 3244static void unplug_slaves(mddev_t *mddev) 3245{ 3246 raid5_conf_t *conf = mddev_to_conf(mddev); 3247 int i; 3248 3249 rcu_read_lock(); 3250 for (i=0; i<mddev->raid_disks; i++) { 3251 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3252 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3253 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3254 3255 atomic_inc(&rdev->nr_pending); 3256 rcu_read_unlock(); 3257 3258 blk_unplug(r_queue); 3259 3260 rdev_dec_pending(rdev, mddev); 3261 rcu_read_lock(); 3262 } 3263 } 3264 rcu_read_unlock(); 3265} 3266 3267static void raid5_unplug_device(struct request_queue *q) 3268{ 3269 mddev_t *mddev = q->queuedata; 3270 raid5_conf_t *conf = mddev_to_conf(mddev); 3271 unsigned long flags; 3272 3273 spin_lock_irqsave(&conf->device_lock, flags); 3274 3275 if (blk_remove_plug(q)) { 3276 conf->seq_flush++; 3277 raid5_activate_delayed(conf); 3278 } 3279 md_wakeup_thread(mddev->thread); 3280 3281 spin_unlock_irqrestore(&conf->device_lock, flags); 3282 3283 unplug_slaves(mddev); 3284} 3285 3286static int raid5_congested(void *data, int bits) 3287{ 3288 mddev_t *mddev = data; 3289 raid5_conf_t *conf = mddev_to_conf(mddev); 3290 3291 /* No difference between reads and writes. Just check 3292 * how busy the stripe_cache is 3293 */ 3294 if (conf->inactive_blocked) 3295 return 1; 3296 if (conf->quiesce) 3297 return 1; 3298 if (list_empty_careful(&conf->inactive_list)) 3299 return 1; 3300 3301 return 0; 3302} 3303 3304/* We want read requests to align with chunks where possible, 3305 * but write requests don't need to. 3306 */ 3307static int raid5_mergeable_bvec(struct request_queue *q, 3308 struct bvec_merge_data *bvm, 3309 struct bio_vec *biovec) 3310{ 3311 mddev_t *mddev = q->queuedata; 3312 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3313 int max; 3314 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3315 unsigned int bio_sectors = bvm->bi_size >> 9; 3316 3317 if ((bvm->bi_rw & 1) == WRITE) 3318 return biovec->bv_len; /* always allow writes to be mergeable */ 3319 3320 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3321 if (max < 0) max = 0; 3322 if (max <= biovec->bv_len && bio_sectors == 0) 3323 return biovec->bv_len; 3324 else 3325 return max; 3326} 3327 3328 3329static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3330{ 3331 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3332 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3333 unsigned int bio_sectors = bio->bi_size >> 9; 3334 3335 return chunk_sectors >= 3336 ((sector & (chunk_sectors - 1)) + bio_sectors); 3337} 3338 3339/* 3340 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3341 * later sampled by raid5d. 3342 */ 3343static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) 3344{ 3345 unsigned long flags; 3346 3347 spin_lock_irqsave(&conf->device_lock, flags); 3348 3349 bi->bi_next = conf->retry_read_aligned_list; 3350 conf->retry_read_aligned_list = bi; 3351 3352 spin_unlock_irqrestore(&conf->device_lock, flags); 3353 md_wakeup_thread(conf->mddev->thread); 3354} 3355 3356 3357static struct bio *remove_bio_from_retry(raid5_conf_t *conf) 3358{ 3359 struct bio *bi; 3360 3361 bi = conf->retry_read_aligned; 3362 if (bi) { 3363 conf->retry_read_aligned = NULL; 3364 return bi; 3365 } 3366 bi = conf->retry_read_aligned_list; 3367 if(bi) { 3368 conf->retry_read_aligned_list = bi->bi_next; 3369 bi->bi_next = NULL; 3370 /* 3371 * this sets the active strip count to 1 and the processed 3372 * strip count to zero (upper 8 bits) 3373 */ 3374 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3375 } 3376 3377 return bi; 3378} 3379 3380 3381/* 3382 * The "raid5_align_endio" should check if the read succeeded and if it 3383 * did, call bio_endio on the original bio (having bio_put the new bio 3384 * first). 3385 * If the read failed.. 3386 */ 3387static void raid5_align_endio(struct bio *bi, int error) 3388{ 3389 struct bio* raid_bi = bi->bi_private; 3390 mddev_t *mddev; 3391 raid5_conf_t *conf; 3392 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3393 mdk_rdev_t *rdev; 3394 3395 bio_put(bi); 3396 3397 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3398 conf = mddev_to_conf(mddev); 3399 rdev = (void*)raid_bi->bi_next; 3400 raid_bi->bi_next = NULL; 3401 3402 rdev_dec_pending(rdev, conf->mddev); 3403 3404 if (!error && uptodate) { 3405 bio_endio(raid_bi, 0); 3406 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3407 wake_up(&conf->wait_for_stripe); 3408 return; 3409 } 3410 3411 3412 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3413 3414 add_bio_to_retry(raid_bi, conf); 3415} 3416 3417static int bio_fits_rdev(struct bio *bi) 3418{ 3419 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3420 3421 if ((bi->bi_size>>9) > q->max_sectors) 3422 return 0; 3423 blk_recount_segments(q, bi); 3424 if (bi->bi_phys_segments > q->max_phys_segments) 3425 return 0; 3426 3427 if (q->merge_bvec_fn) 3428 /* it's too hard to apply the merge_bvec_fn at this stage, 3429 * just just give up 3430 */ 3431 return 0; 3432 3433 return 1; 3434} 3435 3436 3437static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3438{ 3439 mddev_t *mddev = q->queuedata; 3440 raid5_conf_t *conf = mddev_to_conf(mddev); 3441 unsigned int dd_idx; 3442 struct bio* align_bi; 3443 mdk_rdev_t *rdev; 3444 3445 if (!in_chunk_boundary(mddev, raid_bio)) { 3446 pr_debug("chunk_aligned_read : non aligned\n"); 3447 return 0; 3448 } 3449 /* 3450 * use bio_clone to make a copy of the bio 3451 */ 3452 align_bi = bio_clone(raid_bio, GFP_NOIO); 3453 if (!align_bi) 3454 return 0; 3455 /* 3456 * set bi_end_io to a new function, and set bi_private to the 3457 * original bio. 3458 */ 3459 align_bi->bi_end_io = raid5_align_endio; 3460 align_bi->bi_private = raid_bio; 3461 /* 3462 * compute position 3463 */ 3464 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 3465 0, 3466 &dd_idx, NULL); 3467 3468 rcu_read_lock(); 3469 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3470 if (rdev && test_bit(In_sync, &rdev->flags)) { 3471 atomic_inc(&rdev->nr_pending); 3472 rcu_read_unlock(); 3473 raid_bio->bi_next = (void*)rdev; 3474 align_bi->bi_bdev = rdev->bdev; 3475 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3476 align_bi->bi_sector += rdev->data_offset; 3477 3478 if (!bio_fits_rdev(align_bi)) { 3479 /* too big in some way */ 3480 bio_put(align_bi); 3481 rdev_dec_pending(rdev, mddev); 3482 return 0; 3483 } 3484 3485 spin_lock_irq(&conf->device_lock); 3486 wait_event_lock_irq(conf->wait_for_stripe, 3487 conf->quiesce == 0, 3488 conf->device_lock, /* nothing */); 3489 atomic_inc(&conf->active_aligned_reads); 3490 spin_unlock_irq(&conf->device_lock); 3491 3492 generic_make_request(align_bi); 3493 return 1; 3494 } else { 3495 rcu_read_unlock(); 3496 bio_put(align_bi); 3497 return 0; 3498 } 3499} 3500 3501/* __get_priority_stripe - get the next stripe to process 3502 * 3503 * Full stripe writes are allowed to pass preread active stripes up until 3504 * the bypass_threshold is exceeded. In general the bypass_count 3505 * increments when the handle_list is handled before the hold_list; however, it 3506 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 3507 * stripe with in flight i/o. The bypass_count will be reset when the 3508 * head of the hold_list has changed, i.e. the head was promoted to the 3509 * handle_list. 3510 */ 3511static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) 3512{ 3513 struct stripe_head *sh; 3514 3515 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 3516 __func__, 3517 list_empty(&conf->handle_list) ? "empty" : "busy", 3518 list_empty(&conf->hold_list) ? "empty" : "busy", 3519 atomic_read(&conf->pending_full_writes), conf->bypass_count); 3520 3521 if (!list_empty(&conf->handle_list)) { 3522 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); 3523 3524 if (list_empty(&conf->hold_list)) 3525 conf->bypass_count = 0; 3526 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 3527 if (conf->hold_list.next == conf->last_hold) 3528 conf->bypass_count++; 3529 else { 3530 conf->last_hold = conf->hold_list.next; 3531 conf->bypass_count -= conf->bypass_threshold; 3532 if (conf->bypass_count < 0) 3533 conf->bypass_count = 0; 3534 } 3535 } 3536 } else if (!list_empty(&conf->hold_list) && 3537 ((conf->bypass_threshold && 3538 conf->bypass_count > conf->bypass_threshold) || 3539 atomic_read(&conf->pending_full_writes) == 0)) { 3540 sh = list_entry(conf->hold_list.next, 3541 typeof(*sh), lru); 3542 conf->bypass_count -= conf->bypass_threshold; 3543 if (conf->bypass_count < 0) 3544 conf->bypass_count = 0; 3545 } else 3546 return NULL; 3547 3548 list_del_init(&sh->lru); 3549 atomic_inc(&sh->count); 3550 BUG_ON(atomic_read(&sh->count) != 1); 3551 return sh; 3552} 3553 3554static int make_request(struct request_queue *q, struct bio * bi) 3555{ 3556 mddev_t *mddev = q->queuedata; 3557 raid5_conf_t *conf = mddev_to_conf(mddev); 3558 int dd_idx; 3559 sector_t new_sector; 3560 sector_t logical_sector, last_sector; 3561 struct stripe_head *sh; 3562 const int rw = bio_data_dir(bi); 3563 int cpu, remaining; 3564 3565 if (unlikely(bio_barrier(bi))) { 3566 bio_endio(bi, -EOPNOTSUPP); 3567 return 0; 3568 } 3569 3570 md_write_start(mddev, bi); 3571 3572 cpu = part_stat_lock(); 3573 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 3574 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 3575 bio_sectors(bi)); 3576 part_stat_unlock(); 3577 3578 if (rw == READ && 3579 mddev->reshape_position == MaxSector && 3580 chunk_aligned_read(q,bi)) 3581 return 0; 3582 3583 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3584 last_sector = bi->bi_sector + (bi->bi_size>>9); 3585 bi->bi_next = NULL; 3586 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3587 3588 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3589 DEFINE_WAIT(w); 3590 int disks, data_disks; 3591 int previous; 3592 3593 retry: 3594 previous = 0; 3595 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3596 if (likely(conf->reshape_progress == MaxSector)) 3597 disks = conf->raid_disks; 3598 else { 3599 /* spinlock is needed as reshape_progress may be 3600 * 64bit on a 32bit platform, and so it might be 3601 * possible to see a half-updated value 3602 * Ofcourse reshape_progress could change after 3603 * the lock is dropped, so once we get a reference 3604 * to the stripe that we think it is, we will have 3605 * to check again. 3606 */ 3607 spin_lock_irq(&conf->device_lock); 3608 disks = conf->raid_disks; 3609 if (mddev->delta_disks < 0 3610 ? logical_sector < conf->reshape_progress 3611 : logical_sector >= conf->reshape_progress) { 3612 disks = conf->previous_raid_disks; 3613 previous = 1; 3614 } else { 3615 if (mddev->delta_disks < 0 3616 ? logical_sector < conf->reshape_safe 3617 : logical_sector >= conf->reshape_safe) { 3618 spin_unlock_irq(&conf->device_lock); 3619 schedule(); 3620 goto retry; 3621 } 3622 } 3623 spin_unlock_irq(&conf->device_lock); 3624 } 3625 data_disks = disks - conf->max_degraded; 3626 3627 new_sector = raid5_compute_sector(conf, logical_sector, 3628 previous, 3629 &dd_idx, NULL); 3630 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3631 (unsigned long long)new_sector, 3632 (unsigned long long)logical_sector); 3633 3634 sh = get_active_stripe(conf, new_sector, previous, 3635 (bi->bi_rw&RWA_MASK)); 3636 if (sh) { 3637 if (unlikely(conf->reshape_progress != MaxSector)) { 3638 /* expansion might have moved on while waiting for a 3639 * stripe, so we must do the range check again. 3640 * Expansion could still move past after this 3641 * test, but as we are holding a reference to 3642 * 'sh', we know that if that happens, 3643 * STRIPE_EXPANDING will get set and the expansion 3644 * won't proceed until we finish with the stripe. 3645 */ 3646 int must_retry = 0; 3647 spin_lock_irq(&conf->device_lock); 3648 if ((mddev->delta_disks < 0 3649 ? logical_sector >= conf->reshape_progress 3650 : logical_sector < conf->reshape_progress) 3651 && disks == conf->previous_raid_disks) 3652 /* mismatch, need to try again */ 3653 must_retry = 1; 3654 spin_unlock_irq(&conf->device_lock); 3655 if (must_retry) { 3656 release_stripe(sh); 3657 goto retry; 3658 } 3659 } 3660 /* FIXME what if we get a false positive because these 3661 * are being updated. 3662 */ 3663 if (logical_sector >= mddev->suspend_lo && 3664 logical_sector < mddev->suspend_hi) { 3665 release_stripe(sh); 3666 schedule(); 3667 goto retry; 3668 } 3669 3670 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3671 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3672 /* Stripe is busy expanding or 3673 * add failed due to overlap. Flush everything 3674 * and wait a while 3675 */ 3676 raid5_unplug_device(mddev->queue); 3677 release_stripe(sh); 3678 schedule(); 3679 goto retry; 3680 } 3681 finish_wait(&conf->wait_for_overlap, &w); 3682 set_bit(STRIPE_HANDLE, &sh->state); 3683 clear_bit(STRIPE_DELAYED, &sh->state); 3684 release_stripe(sh); 3685 } else { 3686 /* cannot get stripe for read-ahead, just give-up */ 3687 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3688 finish_wait(&conf->wait_for_overlap, &w); 3689 break; 3690 } 3691 3692 } 3693 spin_lock_irq(&conf->device_lock); 3694 remaining = raid5_dec_bi_phys_segments(bi); 3695 spin_unlock_irq(&conf->device_lock); 3696 if (remaining == 0) { 3697 3698 if ( rw == WRITE ) 3699 md_write_end(mddev); 3700 3701 bio_endio(bi, 0); 3702 } 3703 return 0; 3704} 3705 3706static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); 3707 3708static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 3709{ 3710 /* reshaping is quite different to recovery/resync so it is 3711 * handled quite separately ... here. 3712 * 3713 * On each call to sync_request, we gather one chunk worth of 3714 * destination stripes and flag them as expanding. 3715 * Then we find all the source stripes and request reads. 3716 * As the reads complete, handle_stripe will copy the data 3717 * into the destination stripe and release that stripe. 3718 */ 3719 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3720 struct stripe_head *sh; 3721 sector_t first_sector, last_sector; 3722 int raid_disks = conf->previous_raid_disks; 3723 int data_disks = raid_disks - conf->max_degraded; 3724 int new_data_disks = conf->raid_disks - conf->max_degraded; 3725 int i; 3726 int dd_idx; 3727 sector_t writepos, safepos, gap; 3728 sector_t stripe_addr; 3729 3730 if (sector_nr == 0) { 3731 /* If restarting in the middle, skip the initial sectors */ 3732 if (mddev->delta_disks < 0 && 3733 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 3734 sector_nr = raid5_size(mddev, 0, 0) 3735 - conf->reshape_progress; 3736 } else if (mddev->delta_disks > 0 && 3737 conf->reshape_progress > 0) 3738 sector_nr = conf->reshape_progress; 3739 sector_div(sector_nr, new_data_disks); 3740 if (sector_nr) { 3741 *skipped = 1; 3742 return sector_nr; 3743 } 3744 } 3745 3746 /* we update the metadata when there is more than 3Meg 3747 * in the block range (that is rather arbitrary, should 3748 * probably be time based) or when the data about to be 3749 * copied would over-write the source of the data at 3750 * the front of the range. 3751 * i.e. one new_stripe along from reshape_progress new_maps 3752 * to after where reshape_safe old_maps to 3753 */ 3754 writepos = conf->reshape_progress; 3755 sector_div(writepos, new_data_disks); 3756 safepos = conf->reshape_safe; 3757 sector_div(safepos, data_disks); 3758 if (mddev->delta_disks < 0) { 3759 writepos -= conf->chunk_size/512; 3760 safepos += conf->chunk_size/512; 3761 gap = conf->reshape_safe - conf->reshape_progress; 3762 } else { 3763 writepos += conf->chunk_size/512; 3764 safepos -= conf->chunk_size/512; 3765 gap = conf->reshape_progress - conf->reshape_safe; 3766 } 3767 3768 if ((mddev->delta_disks < 0 3769 ? writepos < safepos 3770 : writepos > safepos) || 3771 gap > (new_data_disks)*3000*2 /*3Meg*/) { 3772 /* Cannot proceed until we've updated the superblock... */ 3773 wait_event(conf->wait_for_overlap, 3774 atomic_read(&conf->reshape_stripes)==0); 3775 mddev->reshape_position = conf->reshape_progress; 3776 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3777 md_wakeup_thread(mddev->thread); 3778 wait_event(mddev->sb_wait, mddev->flags == 0 || 3779 kthread_should_stop()); 3780 spin_lock_irq(&conf->device_lock); 3781 conf->reshape_safe = mddev->reshape_position; 3782 spin_unlock_irq(&conf->device_lock); 3783 wake_up(&conf->wait_for_overlap); 3784 } 3785 3786 if (mddev->delta_disks < 0) { 3787 BUG_ON(conf->reshape_progress == 0); 3788 stripe_addr = writepos; 3789 BUG_ON((mddev->dev_sectors & 3790 ~((sector_t)mddev->chunk_size / 512 - 1)) 3791 - (conf->chunk_size / 512) - stripe_addr 3792 != sector_nr); 3793 } else { 3794 BUG_ON(writepos != sector_nr + conf->chunk_size / 512); 3795 stripe_addr = sector_nr; 3796 } 3797 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { 3798 int j; 3799 int skipped = 0; 3800 sh = get_active_stripe(conf, stripe_addr+i, 0, 0); 3801 set_bit(STRIPE_EXPANDING, &sh->state); 3802 atomic_inc(&conf->reshape_stripes); 3803 /* If any of this stripe is beyond the end of the old 3804 * array, then we need to zero those blocks 3805 */ 3806 for (j=sh->disks; j--;) { 3807 sector_t s; 3808 if (j == sh->pd_idx) 3809 continue; 3810 if (conf->level == 6 && 3811 j == sh->qd_idx) 3812 continue; 3813 s = compute_blocknr(sh, j); 3814 if (s < raid5_size(mddev, 0, 0)) { 3815 skipped = 1; 3816 continue; 3817 } 3818 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 3819 set_bit(R5_Expanded, &sh->dev[j].flags); 3820 set_bit(R5_UPTODATE, &sh->dev[j].flags); 3821 } 3822 if (!skipped) { 3823 set_bit(STRIPE_EXPAND_READY, &sh->state); 3824 set_bit(STRIPE_HANDLE, &sh->state); 3825 } 3826 release_stripe(sh); 3827 } 3828 spin_lock_irq(&conf->device_lock); 3829 if (mddev->delta_disks < 0) 3830 conf->reshape_progress -= i * new_data_disks; 3831 else 3832 conf->reshape_progress += i * new_data_disks; 3833 spin_unlock_irq(&conf->device_lock); 3834 /* Ok, those stripe are ready. We can start scheduling 3835 * reads on the source stripes. 3836 * The source stripes are determined by mapping the first and last 3837 * block on the destination stripes. 3838 */ 3839 first_sector = 3840 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 3841 1, &dd_idx, NULL); 3842 last_sector = 3843 raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512) 3844 *(new_data_disks) - 1), 3845 1, &dd_idx, NULL); 3846 if (last_sector >= mddev->dev_sectors) 3847 last_sector = mddev->dev_sectors - 1; 3848 while (first_sector <= last_sector) { 3849 sh = get_active_stripe(conf, first_sector, 1, 0); 3850 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3851 set_bit(STRIPE_HANDLE, &sh->state); 3852 release_stripe(sh); 3853 first_sector += STRIPE_SECTORS; 3854 } 3855 /* If this takes us to the resync_max point where we have to pause, 3856 * then we need to write out the superblock. 3857 */ 3858 sector_nr += conf->chunk_size>>9; 3859 if (sector_nr >= mddev->resync_max) { 3860 /* Cannot proceed until we've updated the superblock... */ 3861 wait_event(conf->wait_for_overlap, 3862 atomic_read(&conf->reshape_stripes) == 0); 3863 mddev->reshape_position = conf->reshape_progress; 3864 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3865 md_wakeup_thread(mddev->thread); 3866 wait_event(mddev->sb_wait, 3867 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 3868 || kthread_should_stop()); 3869 spin_lock_irq(&conf->device_lock); 3870 conf->reshape_safe = mddev->reshape_position; 3871 spin_unlock_irq(&conf->device_lock); 3872 wake_up(&conf->wait_for_overlap); 3873 } 3874 return conf->chunk_size>>9; 3875} 3876 3877/* FIXME go_faster isn't used */ 3878static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 3879{ 3880 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3881 struct stripe_head *sh; 3882 sector_t max_sector = mddev->dev_sectors; 3883 int sync_blocks; 3884 int still_degraded = 0; 3885 int i; 3886 3887 if (sector_nr >= max_sector) { 3888 /* just being told to finish up .. nothing much to do */ 3889 unplug_slaves(mddev); 3890 3891 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 3892 end_reshape(conf); 3893 return 0; 3894 } 3895 3896 if (mddev->curr_resync < max_sector) /* aborted */ 3897 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 3898 &sync_blocks, 1); 3899 else /* completed sync */ 3900 conf->fullsync = 0; 3901 bitmap_close_sync(mddev->bitmap); 3902 3903 return 0; 3904 } 3905 3906 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3907 return reshape_request(mddev, sector_nr, skipped); 3908 3909 /* No need to check resync_max as we never do more than one 3910 * stripe, and as resync_max will always be on a chunk boundary, 3911 * if the check in md_do_sync didn't fire, there is no chance 3912 * of overstepping resync_max here 3913 */ 3914 3915 /* if there is too many failed drives and we are trying 3916 * to resync, then assert that we are finished, because there is 3917 * nothing we can do. 3918 */ 3919 if (mddev->degraded >= conf->max_degraded && 3920 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3921 sector_t rv = mddev->dev_sectors - sector_nr; 3922 *skipped = 1; 3923 return rv; 3924 } 3925 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 3926 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 3927 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 3928 /* we can skip this block, and probably more */ 3929 sync_blocks /= STRIPE_SECTORS; 3930 *skipped = 1; 3931 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 3932 } 3933 3934 3935 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 3936 3937 sh = get_active_stripe(conf, sector_nr, 0, 1); 3938 if (sh == NULL) { 3939 sh = get_active_stripe(conf, sector_nr, 0, 0); 3940 /* make sure we don't swamp the stripe cache if someone else 3941 * is trying to get access 3942 */ 3943 schedule_timeout_uninterruptible(1); 3944 } 3945 /* Need to check if array will still be degraded after recovery/resync 3946 * We don't need to check the 'failed' flag as when that gets set, 3947 * recovery aborts. 3948 */ 3949 for (i=0; i<mddev->raid_disks; i++) 3950 if (conf->disks[i].rdev == NULL) 3951 still_degraded = 1; 3952 3953 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 3954 3955 spin_lock(&sh->lock); 3956 set_bit(STRIPE_SYNCING, &sh->state); 3957 clear_bit(STRIPE_INSYNC, &sh->state); 3958 spin_unlock(&sh->lock); 3959 3960 /* wait for any blocked device to be handled */ 3961 while(unlikely(!handle_stripe(sh, NULL))) 3962 ; 3963 release_stripe(sh); 3964 3965 return STRIPE_SECTORS; 3966} 3967 3968static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) 3969{ 3970 /* We may not be able to submit a whole bio at once as there 3971 * may not be enough stripe_heads available. 3972 * We cannot pre-allocate enough stripe_heads as we may need 3973 * more than exist in the cache (if we allow ever large chunks). 3974 * So we do one stripe head at a time and record in 3975 * ->bi_hw_segments how many have been done. 3976 * 3977 * We *know* that this entire raid_bio is in one chunk, so 3978 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 3979 */ 3980 struct stripe_head *sh; 3981 int dd_idx; 3982 sector_t sector, logical_sector, last_sector; 3983 int scnt = 0; 3984 int remaining; 3985 int handled = 0; 3986 3987 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3988 sector = raid5_compute_sector(conf, logical_sector, 3989 0, &dd_idx, NULL); 3990 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 3991 3992 for (; logical_sector < last_sector; 3993 logical_sector += STRIPE_SECTORS, 3994 sector += STRIPE_SECTORS, 3995 scnt++) { 3996 3997 if (scnt < raid5_bi_hw_segments(raid_bio)) 3998 /* already done this stripe */ 3999 continue; 4000 4001 sh = get_active_stripe(conf, sector, 0, 1); 4002 4003 if (!sh) { 4004 /* failed to get a stripe - must wait */ 4005 raid5_set_bi_hw_segments(raid_bio, scnt); 4006 conf->retry_read_aligned = raid_bio; 4007 return handled; 4008 } 4009 4010 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 4011 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 4012 release_stripe(sh); 4013 raid5_set_bi_hw_segments(raid_bio, scnt); 4014 conf->retry_read_aligned = raid_bio; 4015 return handled; 4016 } 4017 4018 handle_stripe(sh, NULL); 4019 release_stripe(sh); 4020 handled++; 4021 } 4022 spin_lock_irq(&conf->device_lock); 4023 remaining = raid5_dec_bi_phys_segments(raid_bio); 4024 spin_unlock_irq(&conf->device_lock); 4025 if (remaining == 0) 4026 bio_endio(raid_bio, 0); 4027 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4028 wake_up(&conf->wait_for_stripe); 4029 return handled; 4030} 4031 4032 4033 4034/* 4035 * This is our raid5 kernel thread. 4036 * 4037 * We scan the hash table for stripes which can be handled now. 4038 * During the scan, completed stripes are saved for us by the interrupt 4039 * handler, so that they will not have to wait for our next wakeup. 4040 */ 4041static void raid5d(mddev_t *mddev) 4042{ 4043 struct stripe_head *sh; 4044 raid5_conf_t *conf = mddev_to_conf(mddev); 4045 int handled; 4046 4047 pr_debug("+++ raid5d active\n"); 4048 4049 md_check_recovery(mddev); 4050 4051 handled = 0; 4052 spin_lock_irq(&conf->device_lock); 4053 while (1) { 4054 struct bio *bio; 4055 4056 if (conf->seq_flush != conf->seq_write) { 4057 int seq = conf->seq_flush; 4058 spin_unlock_irq(&conf->device_lock); 4059 bitmap_unplug(mddev->bitmap); 4060 spin_lock_irq(&conf->device_lock); 4061 conf->seq_write = seq; 4062 activate_bit_delay(conf); 4063 } 4064 4065 while ((bio = remove_bio_from_retry(conf))) { 4066 int ok; 4067 spin_unlock_irq(&conf->device_lock); 4068 ok = retry_aligned_read(conf, bio); 4069 spin_lock_irq(&conf->device_lock); 4070 if (!ok) 4071 break; 4072 handled++; 4073 } 4074 4075 sh = __get_priority_stripe(conf); 4076 4077 if (!sh) 4078 break; 4079 spin_unlock_irq(&conf->device_lock); 4080 4081 handled++; 4082 handle_stripe(sh, conf->spare_page); 4083 release_stripe(sh); 4084 4085 spin_lock_irq(&conf->device_lock); 4086 } 4087 pr_debug("%d stripes handled\n", handled); 4088 4089 spin_unlock_irq(&conf->device_lock); 4090 4091 async_tx_issue_pending_all(); 4092 unplug_slaves(mddev); 4093 4094 pr_debug("--- raid5d inactive\n"); 4095} 4096 4097static ssize_t 4098raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 4099{ 4100 raid5_conf_t *conf = mddev_to_conf(mddev); 4101 if (conf) 4102 return sprintf(page, "%d\n", conf->max_nr_stripes); 4103 else 4104 return 0; 4105} 4106 4107static ssize_t 4108raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 4109{ 4110 raid5_conf_t *conf = mddev_to_conf(mddev); 4111 unsigned long new; 4112 int err; 4113 4114 if (len >= PAGE_SIZE) 4115 return -EINVAL; 4116 if (!conf) 4117 return -ENODEV; 4118 4119 if (strict_strtoul(page, 10, &new)) 4120 return -EINVAL; 4121 if (new <= 16 || new > 32768) 4122 return -EINVAL; 4123 while (new < conf->max_nr_stripes) { 4124 if (drop_one_stripe(conf)) 4125 conf->max_nr_stripes--; 4126 else 4127 break; 4128 } 4129 err = md_allow_write(mddev); 4130 if (err) 4131 return err; 4132 while (new > conf->max_nr_stripes) { 4133 if (grow_one_stripe(conf)) 4134 conf->max_nr_stripes++; 4135 else break; 4136 } 4137 return len; 4138} 4139 4140static struct md_sysfs_entry 4141raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4142 raid5_show_stripe_cache_size, 4143 raid5_store_stripe_cache_size); 4144 4145static ssize_t 4146raid5_show_preread_threshold(mddev_t *mddev, char *page) 4147{ 4148 raid5_conf_t *conf = mddev_to_conf(mddev); 4149 if (conf) 4150 return sprintf(page, "%d\n", conf->bypass_threshold); 4151 else 4152 return 0; 4153} 4154 4155static ssize_t 4156raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) 4157{ 4158 raid5_conf_t *conf = mddev_to_conf(mddev); 4159 unsigned long new; 4160 if (len >= PAGE_SIZE) 4161 return -EINVAL; 4162 if (!conf) 4163 return -ENODEV; 4164 4165 if (strict_strtoul(page, 10, &new)) 4166 return -EINVAL; 4167 if (new > conf->max_nr_stripes) 4168 return -EINVAL; 4169 conf->bypass_threshold = new; 4170 return len; 4171} 4172 4173static struct md_sysfs_entry 4174raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 4175 S_IRUGO | S_IWUSR, 4176 raid5_show_preread_threshold, 4177 raid5_store_preread_threshold); 4178 4179static ssize_t 4180stripe_cache_active_show(mddev_t *mddev, char *page) 4181{ 4182 raid5_conf_t *conf = mddev_to_conf(mddev); 4183 if (conf) 4184 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4185 else 4186 return 0; 4187} 4188 4189static struct md_sysfs_entry 4190raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4191 4192static struct attribute *raid5_attrs[] = { 4193 &raid5_stripecache_size.attr, 4194 &raid5_stripecache_active.attr, 4195 &raid5_preread_bypass_threshold.attr, 4196 NULL, 4197}; 4198static struct attribute_group raid5_attrs_group = { 4199 .name = NULL, 4200 .attrs = raid5_attrs, 4201}; 4202 4203static sector_t 4204raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) 4205{ 4206 raid5_conf_t *conf = mddev_to_conf(mddev); 4207 4208 if (!sectors) 4209 sectors = mddev->dev_sectors; 4210 if (!raid_disks) { 4211 /* size is defined by the smallest of previous and new size */ 4212 if (conf->raid_disks < conf->previous_raid_disks) 4213 raid_disks = conf->raid_disks; 4214 else 4215 raid_disks = conf->previous_raid_disks; 4216 } 4217 4218 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4219 return sectors * (raid_disks - conf->max_degraded); 4220} 4221 4222static raid5_conf_t *setup_conf(mddev_t *mddev) 4223{ 4224 raid5_conf_t *conf; 4225 int raid_disk, memory; 4226 mdk_rdev_t *rdev; 4227 struct disk_info *disk; 4228 4229 if (mddev->new_level != 5 4230 && mddev->new_level != 4 4231 && mddev->new_level != 6) { 4232 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4233 mdname(mddev), mddev->new_level); 4234 return ERR_PTR(-EIO); 4235 } 4236 if ((mddev->new_level == 5 4237 && !algorithm_valid_raid5(mddev->new_layout)) || 4238 (mddev->new_level == 6 4239 && !algorithm_valid_raid6(mddev->new_layout))) { 4240 printk(KERN_ERR "raid5: %s: layout %d not supported\n", 4241 mdname(mddev), mddev->new_layout); 4242 return ERR_PTR(-EIO); 4243 } 4244 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 4245 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4246 mdname(mddev), mddev->raid_disks); 4247 return ERR_PTR(-EINVAL); 4248 } 4249 4250 if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) { 4251 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4252 mddev->new_chunk, mdname(mddev)); 4253 return ERR_PTR(-EINVAL); 4254 } 4255 4256 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); 4257 if (conf == NULL) 4258 goto abort; 4259 4260 conf->raid_disks = mddev->raid_disks; 4261 if (mddev->reshape_position == MaxSector) 4262 conf->previous_raid_disks = mddev->raid_disks; 4263 else 4264 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4265 4266 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4267 GFP_KERNEL); 4268 if (!conf->disks) 4269 goto abort; 4270 4271 conf->mddev = mddev; 4272 4273 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4274 goto abort; 4275 4276 if (mddev->new_level == 6) { 4277 conf->spare_page = alloc_page(GFP_KERNEL); 4278 if (!conf->spare_page) 4279 goto abort; 4280 } 4281 spin_lock_init(&conf->device_lock); 4282 init_waitqueue_head(&conf->wait_for_stripe); 4283 init_waitqueue_head(&conf->wait_for_overlap); 4284 INIT_LIST_HEAD(&conf->handle_list); 4285 INIT_LIST_HEAD(&conf->hold_list); 4286 INIT_LIST_HEAD(&conf->delayed_list); 4287 INIT_LIST_HEAD(&conf->bitmap_list); 4288 INIT_LIST_HEAD(&conf->inactive_list); 4289 atomic_set(&conf->active_stripes, 0); 4290 atomic_set(&conf->preread_active_stripes, 0); 4291 atomic_set(&conf->active_aligned_reads, 0); 4292 conf->bypass_threshold = BYPASS_THRESHOLD; 4293 4294 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4295 4296 list_for_each_entry(rdev, &mddev->disks, same_set) { 4297 raid_disk = rdev->raid_disk; 4298 if (raid_disk >= conf->raid_disks 4299 || raid_disk < 0) 4300 continue; 4301 disk = conf->disks + raid_disk; 4302 4303 disk->rdev = rdev; 4304 4305 if (test_bit(In_sync, &rdev->flags)) { 4306 char b[BDEVNAME_SIZE]; 4307 printk(KERN_INFO "raid5: device %s operational as raid" 4308 " disk %d\n", bdevname(rdev->bdev,b), 4309 raid_disk); 4310 } else 4311 /* Cannot rely on bitmap to complete recovery */ 4312 conf->fullsync = 1; 4313 } 4314 4315 conf->chunk_size = mddev->new_chunk; 4316 conf->level = mddev->new_level; 4317 if (conf->level == 6) 4318 conf->max_degraded = 2; 4319 else 4320 conf->max_degraded = 1; 4321 conf->algorithm = mddev->new_layout; 4322 conf->max_nr_stripes = NR_STRIPES; 4323 conf->reshape_progress = mddev->reshape_position; 4324 4325 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4326 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4327 if (grow_stripes(conf, conf->max_nr_stripes)) { 4328 printk(KERN_ERR 4329 "raid5: couldn't allocate %dkB for buffers\n", memory); 4330 goto abort; 4331 } else 4332 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4333 memory, mdname(mddev)); 4334 4335 conf->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 4336 if (!conf->thread) { 4337 printk(KERN_ERR 4338 "raid5: couldn't allocate thread for %s\n", 4339 mdname(mddev)); 4340 goto abort; 4341 } 4342 4343 return conf; 4344 4345 abort: 4346 if (conf) { 4347 shrink_stripes(conf); 4348 safe_put_page(conf->spare_page); 4349 kfree(conf->disks); 4350 kfree(conf->stripe_hashtbl); 4351 kfree(conf); 4352 return ERR_PTR(-EIO); 4353 } else 4354 return ERR_PTR(-ENOMEM); 4355} 4356 4357static int run(mddev_t *mddev) 4358{ 4359 raid5_conf_t *conf; 4360 int working_disks = 0; 4361 mdk_rdev_t *rdev; 4362 4363 if (mddev->reshape_position != MaxSector) { 4364 /* Check that we can continue the reshape. 4365 * Currently only disks can change, it must 4366 * increase, and we must be past the point where 4367 * a stripe over-writes itself 4368 */ 4369 sector_t here_new, here_old; 4370 int old_disks; 4371 int max_degraded = (mddev->level == 6 ? 2 : 1); 4372 4373 if (mddev->new_level != mddev->level || 4374 mddev->new_layout != mddev->layout || 4375 mddev->new_chunk != mddev->chunk_size) { 4376 printk(KERN_ERR "raid5: %s: unsupported reshape " 4377 "required - aborting.\n", 4378 mdname(mddev)); 4379 return -EINVAL; 4380 } 4381 old_disks = mddev->raid_disks - mddev->delta_disks; 4382 /* reshape_position must be on a new-stripe boundary, and one 4383 * further up in new geometry must map after here in old 4384 * geometry. 4385 */ 4386 here_new = mddev->reshape_position; 4387 if (sector_div(here_new, (mddev->chunk_size>>9)* 4388 (mddev->raid_disks - max_degraded))) { 4389 printk(KERN_ERR "raid5: reshape_position not " 4390 "on a stripe boundary\n"); 4391 return -EINVAL; 4392 } 4393 /* here_new is the stripe we will write to */ 4394 here_old = mddev->reshape_position; 4395 sector_div(here_old, (mddev->chunk_size>>9)* 4396 (old_disks-max_degraded)); 4397 /* here_old is the first stripe that we might need to read 4398 * from */ 4399 if (here_new >= here_old) { 4400 /* Reading from the same stripe as writing to - bad */ 4401 printk(KERN_ERR "raid5: reshape_position too early for " 4402 "auto-recovery - aborting.\n"); 4403 return -EINVAL; 4404 } 4405 printk(KERN_INFO "raid5: reshape will continue\n"); 4406 /* OK, we should be able to continue; */ 4407 } else { 4408 BUG_ON(mddev->level != mddev->new_level); 4409 BUG_ON(mddev->layout != mddev->new_layout); 4410 BUG_ON(mddev->chunk_size != mddev->new_chunk); 4411 BUG_ON(mddev->delta_disks != 0); 4412 } 4413 4414 if (mddev->private == NULL) 4415 conf = setup_conf(mddev); 4416 else 4417 conf = mddev->private; 4418 4419 if (IS_ERR(conf)) 4420 return PTR_ERR(conf); 4421 4422 mddev->thread = conf->thread; 4423 conf->thread = NULL; 4424 mddev->private = conf; 4425 4426 /* 4427 * 0 for a fully functional array, 1 or 2 for a degraded array. 4428 */ 4429 list_for_each_entry(rdev, &mddev->disks, same_set) 4430 if (rdev->raid_disk >= 0 && 4431 test_bit(In_sync, &rdev->flags)) 4432 working_disks++; 4433 4434 mddev->degraded = conf->raid_disks - working_disks; 4435 4436 if (mddev->degraded > conf->max_degraded) { 4437 printk(KERN_ERR "raid5: not enough operational devices for %s" 4438 " (%d/%d failed)\n", 4439 mdname(mddev), mddev->degraded, conf->raid_disks); 4440 goto abort; 4441 } 4442 4443 /* device size must be a multiple of chunk size */ 4444 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); 4445 mddev->resync_max_sectors = mddev->dev_sectors; 4446 4447 if (mddev->degraded > 0 && 4448 mddev->recovery_cp != MaxSector) { 4449 if (mddev->ok_start_degraded) 4450 printk(KERN_WARNING 4451 "raid5: starting dirty degraded array: %s" 4452 "- data corruption possible.\n", 4453 mdname(mddev)); 4454 else { 4455 printk(KERN_ERR 4456 "raid5: cannot start dirty degraded array for %s\n", 4457 mdname(mddev)); 4458 goto abort; 4459 } 4460 } 4461 4462 if (mddev->degraded == 0) 4463 printk("raid5: raid level %d set %s active with %d out of %d" 4464 " devices, algorithm %d\n", conf->level, mdname(mddev), 4465 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4466 conf->algorithm); 4467 else 4468 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 4469 " out of %d devices, algorithm %d\n", conf->level, 4470 mdname(mddev), mddev->raid_disks - mddev->degraded, 4471 mddev->raid_disks, conf->algorithm); 4472 4473 print_raid5_conf(conf); 4474 4475 if (conf->reshape_progress != MaxSector) { 4476 printk("...ok start reshape thread\n"); 4477 conf->reshape_safe = conf->reshape_progress; 4478 atomic_set(&conf->reshape_stripes, 0); 4479 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4480 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4481 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4482 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4483 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4484 "%s_reshape"); 4485 } 4486 4487 /* read-ahead size must cover two whole stripes, which is 4488 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4489 */ 4490 { 4491 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4492 int stripe = data_disks * 4493 (mddev->chunk_size / PAGE_SIZE); 4494 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4495 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4496 } 4497 4498 /* Ok, everything is just fine now */ 4499 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4500 printk(KERN_WARNING 4501 "raid5: failed to create sysfs attributes for %s\n", 4502 mdname(mddev)); 4503 4504 mddev->queue->queue_lock = &conf->device_lock; 4505 4506 mddev->queue->unplug_fn = raid5_unplug_device; 4507 mddev->queue->backing_dev_info.congested_data = mddev; 4508 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4509 4510 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4511 4512 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4513 4514 return 0; 4515abort: 4516 md_unregister_thread(mddev->thread); 4517 mddev->thread = NULL; 4518 if (conf) { 4519 shrink_stripes(conf); 4520 print_raid5_conf(conf); 4521 safe_put_page(conf->spare_page); 4522 kfree(conf->disks); 4523 kfree(conf->stripe_hashtbl); 4524 kfree(conf); 4525 } 4526 mddev->private = NULL; 4527 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 4528 return -EIO; 4529} 4530 4531 4532 4533static int stop(mddev_t *mddev) 4534{ 4535 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4536 4537 md_unregister_thread(mddev->thread); 4538 mddev->thread = NULL; 4539 shrink_stripes(conf); 4540 kfree(conf->stripe_hashtbl); 4541 mddev->queue->backing_dev_info.congested_fn = NULL; 4542 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 4543 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 4544 kfree(conf->disks); 4545 kfree(conf); 4546 mddev->private = NULL; 4547 return 0; 4548} 4549 4550#ifdef DEBUG 4551static void print_sh(struct seq_file *seq, struct stripe_head *sh) 4552{ 4553 int i; 4554 4555 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 4556 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 4557 seq_printf(seq, "sh %llu, count %d.\n", 4558 (unsigned long long)sh->sector, atomic_read(&sh->count)); 4559 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 4560 for (i = 0; i < sh->disks; i++) { 4561 seq_printf(seq, "(cache%d: %p %ld) ", 4562 i, sh->dev[i].page, sh->dev[i].flags); 4563 } 4564 seq_printf(seq, "\n"); 4565} 4566 4567static void printall(struct seq_file *seq, raid5_conf_t *conf) 4568{ 4569 struct stripe_head *sh; 4570 struct hlist_node *hn; 4571 int i; 4572 4573 spin_lock_irq(&conf->device_lock); 4574 for (i = 0; i < NR_HASH; i++) { 4575 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 4576 if (sh->raid_conf != conf) 4577 continue; 4578 print_sh(seq, sh); 4579 } 4580 } 4581 spin_unlock_irq(&conf->device_lock); 4582} 4583#endif 4584 4585static void status(struct seq_file *seq, mddev_t *mddev) 4586{ 4587 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4588 int i; 4589 4590 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 4591 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4592 for (i = 0; i < conf->raid_disks; i++) 4593 seq_printf (seq, "%s", 4594 conf->disks[i].rdev && 4595 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4596 seq_printf (seq, "]"); 4597#ifdef DEBUG 4598 seq_printf (seq, "\n"); 4599 printall(seq, conf); 4600#endif 4601} 4602 4603static void print_raid5_conf (raid5_conf_t *conf) 4604{ 4605 int i; 4606 struct disk_info *tmp; 4607 4608 printk("RAID5 conf printout:\n"); 4609 if (!conf) { 4610 printk("(conf==NULL)\n"); 4611 return; 4612 } 4613 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 4614 conf->raid_disks - conf->mddev->degraded); 4615 4616 for (i = 0; i < conf->raid_disks; i++) { 4617 char b[BDEVNAME_SIZE]; 4618 tmp = conf->disks + i; 4619 if (tmp->rdev) 4620 printk(" disk %d, o:%d, dev:%s\n", 4621 i, !test_bit(Faulty, &tmp->rdev->flags), 4622 bdevname(tmp->rdev->bdev,b)); 4623 } 4624} 4625 4626static int raid5_spare_active(mddev_t *mddev) 4627{ 4628 int i; 4629 raid5_conf_t *conf = mddev->private; 4630 struct disk_info *tmp; 4631 4632 for (i = 0; i < conf->raid_disks; i++) { 4633 tmp = conf->disks + i; 4634 if (tmp->rdev 4635 && !test_bit(Faulty, &tmp->rdev->flags) 4636 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 4637 unsigned long flags; 4638 spin_lock_irqsave(&conf->device_lock, flags); 4639 mddev->degraded--; 4640 spin_unlock_irqrestore(&conf->device_lock, flags); 4641 } 4642 } 4643 print_raid5_conf(conf); 4644 return 0; 4645} 4646 4647static int raid5_remove_disk(mddev_t *mddev, int number) 4648{ 4649 raid5_conf_t *conf = mddev->private; 4650 int err = 0; 4651 mdk_rdev_t *rdev; 4652 struct disk_info *p = conf->disks + number; 4653 4654 print_raid5_conf(conf); 4655 rdev = p->rdev; 4656 if (rdev) { 4657 if (number >= conf->raid_disks && 4658 conf->reshape_progress == MaxSector) 4659 clear_bit(In_sync, &rdev->flags); 4660 4661 if (test_bit(In_sync, &rdev->flags) || 4662 atomic_read(&rdev->nr_pending)) { 4663 err = -EBUSY; 4664 goto abort; 4665 } 4666 /* Only remove non-faulty devices if recovery 4667 * isn't possible. 4668 */ 4669 if (!test_bit(Faulty, &rdev->flags) && 4670 mddev->degraded <= conf->max_degraded && 4671 number < conf->raid_disks) { 4672 err = -EBUSY; 4673 goto abort; 4674 } 4675 p->rdev = NULL; 4676 synchronize_rcu(); 4677 if (atomic_read(&rdev->nr_pending)) { 4678 /* lost the race, try later */ 4679 err = -EBUSY; 4680 p->rdev = rdev; 4681 } 4682 } 4683abort: 4684 4685 print_raid5_conf(conf); 4686 return err; 4687} 4688 4689static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 4690{ 4691 raid5_conf_t *conf = mddev->private; 4692 int err = -EEXIST; 4693 int disk; 4694 struct disk_info *p; 4695 int first = 0; 4696 int last = conf->raid_disks - 1; 4697 4698 if (mddev->degraded > conf->max_degraded) 4699 /* no point adding a device */ 4700 return -EINVAL; 4701 4702 if (rdev->raid_disk >= 0) 4703 first = last = rdev->raid_disk; 4704 4705 /* 4706 * find the disk ... but prefer rdev->saved_raid_disk 4707 * if possible. 4708 */ 4709 if (rdev->saved_raid_disk >= 0 && 4710 rdev->saved_raid_disk >= first && 4711 conf->disks[rdev->saved_raid_disk].rdev == NULL) 4712 disk = rdev->saved_raid_disk; 4713 else 4714 disk = first; 4715 for ( ; disk <= last ; disk++) 4716 if ((p=conf->disks + disk)->rdev == NULL) { 4717 clear_bit(In_sync, &rdev->flags); 4718 rdev->raid_disk = disk; 4719 err = 0; 4720 if (rdev->saved_raid_disk != disk) 4721 conf->fullsync = 1; 4722 rcu_assign_pointer(p->rdev, rdev); 4723 break; 4724 } 4725 print_raid5_conf(conf); 4726 return err; 4727} 4728 4729static int raid5_resize(mddev_t *mddev, sector_t sectors) 4730{ 4731 /* no resync is happening, and there is enough space 4732 * on all devices, so we can resize. 4733 * We need to make sure resync covers any new space. 4734 * If the array is shrinking we should possibly wait until 4735 * any io in the removed space completes, but it hardly seems 4736 * worth it. 4737 */ 4738 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4739 md_set_array_sectors(mddev, raid5_size(mddev, sectors, 4740 mddev->raid_disks)); 4741 if (mddev->array_sectors > 4742 raid5_size(mddev, sectors, mddev->raid_disks)) 4743 return -EINVAL; 4744 set_capacity(mddev->gendisk, mddev->array_sectors); 4745 mddev->changed = 1; 4746 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { 4747 mddev->recovery_cp = mddev->dev_sectors; 4748 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4749 } 4750 mddev->dev_sectors = sectors; 4751 mddev->resync_max_sectors = sectors; 4752 return 0; 4753} 4754 4755#ifdef CONFIG_MD_RAID5_RESHAPE 4756static int raid5_check_reshape(mddev_t *mddev) 4757{ 4758 raid5_conf_t *conf = mddev_to_conf(mddev); 4759 4760 if (mddev->delta_disks == 0) 4761 return 0; /* nothing to do */ 4762 if (mddev->bitmap) 4763 /* Cannot grow a bitmap yet */ 4764 return -EBUSY; 4765 if (mddev->degraded > conf->max_degraded) 4766 return -EINVAL; 4767 if (mddev->delta_disks < 0) { 4768 /* We might be able to shrink, but the devices must 4769 * be made bigger first. 4770 * For raid6, 4 is the minimum size. 4771 * Otherwise 2 is the minimum 4772 */ 4773 int min = 2; 4774 if (mddev->level == 6) 4775 min = 4; 4776 if (mddev->raid_disks + mddev->delta_disks < min) 4777 return -EINVAL; 4778 } 4779 4780 /* Can only proceed if there are plenty of stripe_heads. 4781 * We need a minimum of one full stripe,, and for sensible progress 4782 * it is best to have about 4 times that. 4783 * If we require 4 times, then the default 256 4K stripe_heads will 4784 * allow for chunk sizes up to 256K, which is probably OK. 4785 * If the chunk size is greater, user-space should request more 4786 * stripe_heads first. 4787 */ 4788 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 4789 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4790 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4791 (mddev->chunk_size / STRIPE_SIZE)*4); 4792 return -ENOSPC; 4793 } 4794 4795 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 4796} 4797 4798static int raid5_start_reshape(mddev_t *mddev) 4799{ 4800 raid5_conf_t *conf = mddev_to_conf(mddev); 4801 mdk_rdev_t *rdev; 4802 int spares = 0; 4803 int added_devices = 0; 4804 unsigned long flags; 4805 4806 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4807 return -EBUSY; 4808 4809 list_for_each_entry(rdev, &mddev->disks, same_set) 4810 if (rdev->raid_disk < 0 && 4811 !test_bit(Faulty, &rdev->flags)) 4812 spares++; 4813 4814 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 4815 /* Not enough devices even to make a degraded array 4816 * of that size 4817 */ 4818 return -EINVAL; 4819 4820 /* Refuse to reduce size of the array. Any reductions in 4821 * array size must be through explicit setting of array_size 4822 * attribute. 4823 */ 4824 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 4825 < mddev->array_sectors) { 4826 printk(KERN_ERR "md: %s: array size must be reduced " 4827 "before number of disks\n", mdname(mddev)); 4828 return -EINVAL; 4829 } 4830 4831 atomic_set(&conf->reshape_stripes, 0); 4832 spin_lock_irq(&conf->device_lock); 4833 conf->previous_raid_disks = conf->raid_disks; 4834 conf->raid_disks += mddev->delta_disks; 4835 if (mddev->delta_disks < 0) 4836 conf->reshape_progress = raid5_size(mddev, 0, 0); 4837 else 4838 conf->reshape_progress = 0; 4839 conf->reshape_safe = conf->reshape_progress; 4840 spin_unlock_irq(&conf->device_lock); 4841 4842 /* Add some new drives, as many as will fit. 4843 * We know there are enough to make the newly sized array work. 4844 */ 4845 list_for_each_entry(rdev, &mddev->disks, same_set) 4846 if (rdev->raid_disk < 0 && 4847 !test_bit(Faulty, &rdev->flags)) { 4848 if (raid5_add_disk(mddev, rdev) == 0) { 4849 char nm[20]; 4850 set_bit(In_sync, &rdev->flags); 4851 added_devices++; 4852 rdev->recovery_offset = 0; 4853 sprintf(nm, "rd%d", rdev->raid_disk); 4854 if (sysfs_create_link(&mddev->kobj, 4855 &rdev->kobj, nm)) 4856 printk(KERN_WARNING 4857 "raid5: failed to create " 4858 " link %s for %s\n", 4859 nm, mdname(mddev)); 4860 } else 4861 break; 4862 } 4863 4864 if (mddev->delta_disks > 0) { 4865 spin_lock_irqsave(&conf->device_lock, flags); 4866 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) 4867 - added_devices; 4868 spin_unlock_irqrestore(&conf->device_lock, flags); 4869 } 4870 mddev->raid_disks = conf->raid_disks; 4871 mddev->reshape_position = 0; 4872 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4873 4874 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4875 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4876 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4877 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4878 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4879 "%s_reshape"); 4880 if (!mddev->sync_thread) { 4881 mddev->recovery = 0; 4882 spin_lock_irq(&conf->device_lock); 4883 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 4884 conf->reshape_progress = MaxSector; 4885 spin_unlock_irq(&conf->device_lock); 4886 return -EAGAIN; 4887 } 4888 md_wakeup_thread(mddev->sync_thread); 4889 md_new_event(mddev); 4890 return 0; 4891} 4892#endif 4893 4894/* This is called from the reshape thread and should make any 4895 * changes needed in 'conf' 4896 */ 4897static void end_reshape(raid5_conf_t *conf) 4898{ 4899 4900 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4901 4902 spin_lock_irq(&conf->device_lock); 4903 conf->previous_raid_disks = conf->raid_disks; 4904 conf->reshape_progress = MaxSector; 4905 spin_unlock_irq(&conf->device_lock); 4906 4907 /* read-ahead size must cover two whole stripes, which is 4908 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4909 */ 4910 { 4911 int data_disks = conf->raid_disks - conf->max_degraded; 4912 int stripe = data_disks * (conf->chunk_size 4913 / PAGE_SIZE); 4914 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4915 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4916 } 4917 } 4918} 4919 4920/* This is called from the raid5d thread with mddev_lock held. 4921 * It makes config changes to the device. 4922 */ 4923static void raid5_finish_reshape(mddev_t *mddev) 4924{ 4925 struct block_device *bdev; 4926 4927 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4928 4929 if (mddev->delta_disks > 0) { 4930 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4931 set_capacity(mddev->gendisk, mddev->array_sectors); 4932 mddev->changed = 1; 4933 4934 bdev = bdget_disk(mddev->gendisk, 0); 4935 if (bdev) { 4936 mutex_lock(&bdev->bd_inode->i_mutex); 4937 i_size_write(bdev->bd_inode, 4938 (loff_t)mddev->array_sectors << 9); 4939 mutex_unlock(&bdev->bd_inode->i_mutex); 4940 bdput(bdev); 4941 } 4942 } else { 4943 int d; 4944 raid5_conf_t *conf = mddev_to_conf(mddev); 4945 mddev->degraded = conf->raid_disks; 4946 for (d = 0; d < conf->raid_disks ; d++) 4947 if (conf->disks[d].rdev && 4948 test_bit(In_sync, 4949 &conf->disks[d].rdev->flags)) 4950 mddev->degraded--; 4951 for (d = conf->raid_disks ; 4952 d < conf->raid_disks - mddev->delta_disks; 4953 d++) 4954 raid5_remove_disk(mddev, d); 4955 } 4956 mddev->reshape_position = MaxSector; 4957 mddev->delta_disks = 0; 4958 } 4959} 4960 4961static void raid5_quiesce(mddev_t *mddev, int state) 4962{ 4963 raid5_conf_t *conf = mddev_to_conf(mddev); 4964 4965 switch(state) { 4966 case 2: /* resume for a suspend */ 4967 wake_up(&conf->wait_for_overlap); 4968 break; 4969 4970 case 1: /* stop all writes */ 4971 spin_lock_irq(&conf->device_lock); 4972 conf->quiesce = 1; 4973 wait_event_lock_irq(conf->wait_for_stripe, 4974 atomic_read(&conf->active_stripes) == 0 && 4975 atomic_read(&conf->active_aligned_reads) == 0, 4976 conf->device_lock, /* nothing */); 4977 spin_unlock_irq(&conf->device_lock); 4978 break; 4979 4980 case 0: /* re-enable writes */ 4981 spin_lock_irq(&conf->device_lock); 4982 conf->quiesce = 0; 4983 wake_up(&conf->wait_for_stripe); 4984 wake_up(&conf->wait_for_overlap); 4985 spin_unlock_irq(&conf->device_lock); 4986 break; 4987 } 4988} 4989 4990 4991static void *raid5_takeover_raid1(mddev_t *mddev) 4992{ 4993 int chunksect; 4994 4995 if (mddev->raid_disks != 2 || 4996 mddev->degraded > 1) 4997 return ERR_PTR(-EINVAL); 4998 4999 /* Should check if there are write-behind devices? */ 5000 5001 chunksect = 64*2; /* 64K by default */ 5002 5003 /* The array must be an exact multiple of chunksize */ 5004 while (chunksect && (mddev->array_sectors & (chunksect-1))) 5005 chunksect >>= 1; 5006 5007 if ((chunksect<<9) < STRIPE_SIZE) 5008 /* array size does not allow a suitable chunk size */ 5009 return ERR_PTR(-EINVAL); 5010 5011 mddev->new_level = 5; 5012 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 5013 mddev->new_chunk = chunksect << 9; 5014 5015 return setup_conf(mddev); 5016} 5017 5018static void *raid5_takeover_raid6(mddev_t *mddev) 5019{ 5020 int new_layout; 5021 5022 switch (mddev->layout) { 5023 case ALGORITHM_LEFT_ASYMMETRIC_6: 5024 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 5025 break; 5026 case ALGORITHM_RIGHT_ASYMMETRIC_6: 5027 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 5028 break; 5029 case ALGORITHM_LEFT_SYMMETRIC_6: 5030 new_layout = ALGORITHM_LEFT_SYMMETRIC; 5031 break; 5032 case ALGORITHM_RIGHT_SYMMETRIC_6: 5033 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 5034 break; 5035 case ALGORITHM_PARITY_0_6: 5036 new_layout = ALGORITHM_PARITY_0; 5037 break; 5038 case ALGORITHM_PARITY_N: 5039 new_layout = ALGORITHM_PARITY_N; 5040 break; 5041 default: 5042 return ERR_PTR(-EINVAL); 5043 } 5044 mddev->new_level = 5; 5045 mddev->new_layout = new_layout; 5046 mddev->delta_disks = -1; 5047 mddev->raid_disks -= 1; 5048 return setup_conf(mddev); 5049} 5050 5051 5052static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) 5053{ 5054 /* Currently the layout and chunk size can only be changed 5055 * for a 2-drive raid array, as in that case no data shuffling 5056 * is required. 5057 * Later we might validate these and set new_* so a reshape 5058 * can complete the change. 5059 */ 5060 raid5_conf_t *conf = mddev_to_conf(mddev); 5061 5062 if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) 5063 return -EINVAL; 5064 if (new_chunk > 0) { 5065 if (new_chunk & (new_chunk-1)) 5066 /* not a power of 2 */ 5067 return -EINVAL; 5068 if (new_chunk < PAGE_SIZE) 5069 return -EINVAL; 5070 if (mddev->array_sectors & ((new_chunk>>9)-1)) 5071 /* not factor of array size */ 5072 return -EINVAL; 5073 } 5074 5075 /* They look valid */ 5076 5077 if (mddev->raid_disks != 2) 5078 return -EINVAL; 5079 5080 if (new_layout >= 0) { 5081 conf->algorithm = new_layout; 5082 mddev->layout = mddev->new_layout = new_layout; 5083 } 5084 if (new_chunk > 0) { 5085 conf->chunk_size = new_chunk; 5086 mddev->chunk_size = mddev->new_chunk = new_chunk; 5087 } 5088 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5089 md_wakeup_thread(mddev->thread); 5090 return 0; 5091} 5092 5093static void *raid5_takeover(mddev_t *mddev) 5094{ 5095 /* raid5 can take over: 5096 * raid0 - if all devices are the same - make it a raid4 layout 5097 * raid1 - if there are two drives. We need to know the chunk size 5098 * raid4 - trivial - just use a raid4 layout. 5099 * raid6 - Providing it is a *_6 layout 5100 * 5101 * For now, just do raid1 5102 */ 5103 5104 if (mddev->level == 1) 5105 return raid5_takeover_raid1(mddev); 5106 if (mddev->level == 4) { 5107 mddev->new_layout = ALGORITHM_PARITY_N; 5108 mddev->new_level = 5; 5109 return setup_conf(mddev); 5110 } 5111 if (mddev->level == 6) 5112 return raid5_takeover_raid6(mddev); 5113 5114 return ERR_PTR(-EINVAL); 5115} 5116 5117 5118static struct mdk_personality raid5_personality; 5119 5120static void *raid6_takeover(mddev_t *mddev) 5121{ 5122 /* Currently can only take over a raid5. We map the 5123 * personality to an equivalent raid6 personality 5124 * with the Q block at the end. 5125 */ 5126 int new_layout; 5127 5128 if (mddev->pers != &raid5_personality) 5129 return ERR_PTR(-EINVAL); 5130 if (mddev->degraded > 1) 5131 return ERR_PTR(-EINVAL); 5132 if (mddev->raid_disks > 253) 5133 return ERR_PTR(-EINVAL); 5134 if (mddev->raid_disks < 3) 5135 return ERR_PTR(-EINVAL); 5136 5137 switch (mddev->layout) { 5138 case ALGORITHM_LEFT_ASYMMETRIC: 5139 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 5140 break; 5141 case ALGORITHM_RIGHT_ASYMMETRIC: 5142 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 5143 break; 5144 case ALGORITHM_LEFT_SYMMETRIC: 5145 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 5146 break; 5147 case ALGORITHM_RIGHT_SYMMETRIC: 5148 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 5149 break; 5150 case ALGORITHM_PARITY_0: 5151 new_layout = ALGORITHM_PARITY_0_6; 5152 break; 5153 case ALGORITHM_PARITY_N: 5154 new_layout = ALGORITHM_PARITY_N; 5155 break; 5156 default: 5157 return ERR_PTR(-EINVAL); 5158 } 5159 mddev->new_level = 6; 5160 mddev->new_layout = new_layout; 5161 mddev->delta_disks = 1; 5162 mddev->raid_disks += 1; 5163 return setup_conf(mddev); 5164} 5165 5166 5167static struct mdk_personality raid6_personality = 5168{ 5169 .name = "raid6", 5170 .level = 6, 5171 .owner = THIS_MODULE, 5172 .make_request = make_request, 5173 .run = run, 5174 .stop = stop, 5175 .status = status, 5176 .error_handler = error, 5177 .hot_add_disk = raid5_add_disk, 5178 .hot_remove_disk= raid5_remove_disk, 5179 .spare_active = raid5_spare_active, 5180 .sync_request = sync_request, 5181 .resize = raid5_resize, 5182 .size = raid5_size, 5183#ifdef CONFIG_MD_RAID5_RESHAPE 5184 .check_reshape = raid5_check_reshape, 5185 .start_reshape = raid5_start_reshape, 5186 .finish_reshape = raid5_finish_reshape, 5187#endif 5188 .quiesce = raid5_quiesce, 5189 .takeover = raid6_takeover, 5190}; 5191static struct mdk_personality raid5_personality = 5192{ 5193 .name = "raid5", 5194 .level = 5, 5195 .owner = THIS_MODULE, 5196 .make_request = make_request, 5197 .run = run, 5198 .stop = stop, 5199 .status = status, 5200 .error_handler = error, 5201 .hot_add_disk = raid5_add_disk, 5202 .hot_remove_disk= raid5_remove_disk, 5203 .spare_active = raid5_spare_active, 5204 .sync_request = sync_request, 5205 .resize = raid5_resize, 5206 .size = raid5_size, 5207#ifdef CONFIG_MD_RAID5_RESHAPE 5208 .check_reshape = raid5_check_reshape, 5209 .start_reshape = raid5_start_reshape, 5210 .finish_reshape = raid5_finish_reshape, 5211#endif 5212 .quiesce = raid5_quiesce, 5213 .takeover = raid5_takeover, 5214 .reconfig = raid5_reconfig, 5215}; 5216 5217static struct mdk_personality raid4_personality = 5218{ 5219 .name = "raid4", 5220 .level = 4, 5221 .owner = THIS_MODULE, 5222 .make_request = make_request, 5223 .run = run, 5224 .stop = stop, 5225 .status = status, 5226 .error_handler = error, 5227 .hot_add_disk = raid5_add_disk, 5228 .hot_remove_disk= raid5_remove_disk, 5229 .spare_active = raid5_spare_active, 5230 .sync_request = sync_request, 5231 .resize = raid5_resize, 5232 .size = raid5_size, 5233#ifdef CONFIG_MD_RAID5_RESHAPE 5234 .check_reshape = raid5_check_reshape, 5235 .start_reshape = raid5_start_reshape, 5236 .finish_reshape = raid5_finish_reshape, 5237#endif 5238 .quiesce = raid5_quiesce, 5239}; 5240 5241static int __init raid5_init(void) 5242{ 5243 register_md_personality(&raid6_personality); 5244 register_md_personality(&raid5_personality); 5245 register_md_personality(&raid4_personality); 5246 return 0; 5247} 5248 5249static void raid5_exit(void) 5250{ 5251 unregister_md_personality(&raid6_personality); 5252 unregister_md_personality(&raid5_personality); 5253 unregister_md_personality(&raid4_personality); 5254} 5255 5256module_init(raid5_init); 5257module_exit(raid5_exit); 5258MODULE_LICENSE("GPL"); 5259MODULE_ALIAS("md-personality-4"); /* RAID5 */ 5260MODULE_ALIAS("md-raid5"); 5261MODULE_ALIAS("md-raid4"); 5262MODULE_ALIAS("md-level-5"); 5263MODULE_ALIAS("md-level-4"); 5264MODULE_ALIAS("md-personality-8"); /* RAID6 */ 5265MODULE_ALIAS("md-raid6"); 5266MODULE_ALIAS("md-level-6"); 5267 5268/* This used to be two separate modules, they were: */ 5269MODULE_ALIAS("raid5"); 5270MODULE_ALIAS("raid6"); 5271