raid5.c revision 5fd6c1dce06ec24ef3de20fe0c7ecf2ba9fe5ef9
1/* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 22#include <linux/config.h> 23#include <linux/module.h> 24#include <linux/slab.h> 25#include <linux/highmem.h> 26#include <linux/bitops.h> 27#include <linux/kthread.h> 28#include <asm/atomic.h> 29#include "raid6.h" 30 31#include <linux/raid/bitmap.h> 32 33/* 34 * Stripe cache 35 */ 36 37#define NR_STRIPES 256 38#define STRIPE_SIZE PAGE_SIZE 39#define STRIPE_SHIFT (PAGE_SHIFT - 9) 40#define STRIPE_SECTORS (STRIPE_SIZE>>9) 41#define IO_THRESHOLD 1 42#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 43#define HASH_MASK (NR_HASH - 1) 44 45#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 46 47/* bio's attached to a stripe+device for I/O are linked together in bi_sector 48 * order without overlap. There may be several bio's per stripe+device, and 49 * a bio could span several devices. 50 * When walking this list for a particular stripe+device, we must never proceed 51 * beyond a bio that extends past this device, as the next bio might no longer 52 * be valid. 53 * This macro is used to determine the 'next' bio in the list, given the sector 54 * of the current stripe+device 55 */ 56#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 57/* 58 * The following can be used to debug the driver 59 */ 60#define RAID5_DEBUG 0 61#define RAID5_PARANOIA 1 62#if RAID5_PARANOIA && defined(CONFIG_SMP) 63# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 64#else 65# define CHECK_DEVLOCK() 66#endif 67 68#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x))) 69#if RAID5_DEBUG 70#define inline 71#define __inline__ 72#endif 73 74#if !RAID6_USE_EMPTY_ZERO_PAGE 75/* In .bss so it's zeroed */ 76const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); 77#endif 78 79static inline int raid6_next_disk(int disk, int raid_disks) 80{ 81 disk++; 82 return (disk < raid_disks) ? disk : 0; 83} 84static void print_raid5_conf (raid5_conf_t *conf); 85 86static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 87{ 88 if (atomic_dec_and_test(&sh->count)) { 89 BUG_ON(!list_empty(&sh->lru)); 90 BUG_ON(atomic_read(&conf->active_stripes)==0); 91 if (test_bit(STRIPE_HANDLE, &sh->state)) { 92 if (test_bit(STRIPE_DELAYED, &sh->state)) 93 list_add_tail(&sh->lru, &conf->delayed_list); 94 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 95 conf->seq_write == sh->bm_seq) 96 list_add_tail(&sh->lru, &conf->bitmap_list); 97 else { 98 clear_bit(STRIPE_BIT_DELAY, &sh->state); 99 list_add_tail(&sh->lru, &conf->handle_list); 100 } 101 md_wakeup_thread(conf->mddev->thread); 102 } else { 103 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 104 atomic_dec(&conf->preread_active_stripes); 105 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 106 md_wakeup_thread(conf->mddev->thread); 107 } 108 atomic_dec(&conf->active_stripes); 109 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 110 list_add_tail(&sh->lru, &conf->inactive_list); 111 wake_up(&conf->wait_for_stripe); 112 } 113 } 114 } 115} 116static void release_stripe(struct stripe_head *sh) 117{ 118 raid5_conf_t *conf = sh->raid_conf; 119 unsigned long flags; 120 121 spin_lock_irqsave(&conf->device_lock, flags); 122 __release_stripe(conf, sh); 123 spin_unlock_irqrestore(&conf->device_lock, flags); 124} 125 126static inline void remove_hash(struct stripe_head *sh) 127{ 128 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 129 130 hlist_del_init(&sh->hash); 131} 132 133static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 134{ 135 struct hlist_head *hp = stripe_hash(conf, sh->sector); 136 137 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 138 139 CHECK_DEVLOCK(); 140 hlist_add_head(&sh->hash, hp); 141} 142 143 144/* find an idle stripe, make sure it is unhashed, and return it. */ 145static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 146{ 147 struct stripe_head *sh = NULL; 148 struct list_head *first; 149 150 CHECK_DEVLOCK(); 151 if (list_empty(&conf->inactive_list)) 152 goto out; 153 first = conf->inactive_list.next; 154 sh = list_entry(first, struct stripe_head, lru); 155 list_del_init(first); 156 remove_hash(sh); 157 atomic_inc(&conf->active_stripes); 158out: 159 return sh; 160} 161 162static void shrink_buffers(struct stripe_head *sh, int num) 163{ 164 struct page *p; 165 int i; 166 167 for (i=0; i<num ; i++) { 168 p = sh->dev[i].page; 169 if (!p) 170 continue; 171 sh->dev[i].page = NULL; 172 put_page(p); 173 } 174} 175 176static int grow_buffers(struct stripe_head *sh, int num) 177{ 178 int i; 179 180 for (i=0; i<num; i++) { 181 struct page *page; 182 183 if (!(page = alloc_page(GFP_KERNEL))) { 184 return 1; 185 } 186 sh->dev[i].page = page; 187 } 188 return 0; 189} 190 191static void raid5_build_block (struct stripe_head *sh, int i); 192 193static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) 194{ 195 raid5_conf_t *conf = sh->raid_conf; 196 int i; 197 198 BUG_ON(atomic_read(&sh->count) != 0); 199 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 200 201 CHECK_DEVLOCK(); 202 PRINTK("init_stripe called, stripe %llu\n", 203 (unsigned long long)sh->sector); 204 205 remove_hash(sh); 206 207 sh->sector = sector; 208 sh->pd_idx = pd_idx; 209 sh->state = 0; 210 211 sh->disks = disks; 212 213 for (i = sh->disks; i--; ) { 214 struct r5dev *dev = &sh->dev[i]; 215 216 if (dev->toread || dev->towrite || dev->written || 217 test_bit(R5_LOCKED, &dev->flags)) { 218 printk("sector=%llx i=%d %p %p %p %d\n", 219 (unsigned long long)sh->sector, i, dev->toread, 220 dev->towrite, dev->written, 221 test_bit(R5_LOCKED, &dev->flags)); 222 BUG(); 223 } 224 dev->flags = 0; 225 raid5_build_block(sh, i); 226 } 227 insert_hash(conf, sh); 228} 229 230static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) 231{ 232 struct stripe_head *sh; 233 struct hlist_node *hn; 234 235 CHECK_DEVLOCK(); 236 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 237 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 238 if (sh->sector == sector && sh->disks == disks) 239 return sh; 240 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 241 return NULL; 242} 243 244static void unplug_slaves(mddev_t *mddev); 245static void raid5_unplug_device(request_queue_t *q); 246 247static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 248 int pd_idx, int noblock) 249{ 250 struct stripe_head *sh; 251 252 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector); 253 254 spin_lock_irq(&conf->device_lock); 255 256 do { 257 wait_event_lock_irq(conf->wait_for_stripe, 258 conf->quiesce == 0, 259 conf->device_lock, /* nothing */); 260 sh = __find_stripe(conf, sector, disks); 261 if (!sh) { 262 if (!conf->inactive_blocked) 263 sh = get_free_stripe(conf); 264 if (noblock && sh == NULL) 265 break; 266 if (!sh) { 267 conf->inactive_blocked = 1; 268 wait_event_lock_irq(conf->wait_for_stripe, 269 !list_empty(&conf->inactive_list) && 270 (atomic_read(&conf->active_stripes) 271 < (conf->max_nr_stripes *3/4) 272 || !conf->inactive_blocked), 273 conf->device_lock, 274 unplug_slaves(conf->mddev) 275 ); 276 conf->inactive_blocked = 0; 277 } else 278 init_stripe(sh, sector, pd_idx, disks); 279 } else { 280 if (atomic_read(&sh->count)) { 281 BUG_ON(!list_empty(&sh->lru)); 282 } else { 283 if (!test_bit(STRIPE_HANDLE, &sh->state)) 284 atomic_inc(&conf->active_stripes); 285 if (list_empty(&sh->lru)) 286 BUG(); 287 list_del_init(&sh->lru); 288 } 289 } 290 } while (sh == NULL); 291 292 if (sh) 293 atomic_inc(&sh->count); 294 295 spin_unlock_irq(&conf->device_lock); 296 return sh; 297} 298 299static int grow_one_stripe(raid5_conf_t *conf) 300{ 301 struct stripe_head *sh; 302 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 303 if (!sh) 304 return 0; 305 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 306 sh->raid_conf = conf; 307 spin_lock_init(&sh->lock); 308 309 if (grow_buffers(sh, conf->raid_disks)) { 310 shrink_buffers(sh, conf->raid_disks); 311 kmem_cache_free(conf->slab_cache, sh); 312 return 0; 313 } 314 sh->disks = conf->raid_disks; 315 /* we just created an active stripe so... */ 316 atomic_set(&sh->count, 1); 317 atomic_inc(&conf->active_stripes); 318 INIT_LIST_HEAD(&sh->lru); 319 release_stripe(sh); 320 return 1; 321} 322 323static int grow_stripes(raid5_conf_t *conf, int num) 324{ 325 kmem_cache_t *sc; 326 int devs = conf->raid_disks; 327 328 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); 329 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev)); 330 conf->active_name = 0; 331 sc = kmem_cache_create(conf->cache_name[conf->active_name], 332 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 333 0, 0, NULL, NULL); 334 if (!sc) 335 return 1; 336 conf->slab_cache = sc; 337 conf->pool_size = devs; 338 while (num--) 339 if (!grow_one_stripe(conf)) 340 return 1; 341 return 0; 342} 343 344#ifdef CONFIG_MD_RAID5_RESHAPE 345static int resize_stripes(raid5_conf_t *conf, int newsize) 346{ 347 /* Make all the stripes able to hold 'newsize' devices. 348 * New slots in each stripe get 'page' set to a new page. 349 * 350 * This happens in stages: 351 * 1/ create a new kmem_cache and allocate the required number of 352 * stripe_heads. 353 * 2/ gather all the old stripe_heads and tranfer the pages across 354 * to the new stripe_heads. This will have the side effect of 355 * freezing the array as once all stripe_heads have been collected, 356 * no IO will be possible. Old stripe heads are freed once their 357 * pages have been transferred over, and the old kmem_cache is 358 * freed when all stripes are done. 359 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 360 * we simple return a failre status - no need to clean anything up. 361 * 4/ allocate new pages for the new slots in the new stripe_heads. 362 * If this fails, we don't bother trying the shrink the 363 * stripe_heads down again, we just leave them as they are. 364 * As each stripe_head is processed the new one is released into 365 * active service. 366 * 367 * Once step2 is started, we cannot afford to wait for a write, 368 * so we use GFP_NOIO allocations. 369 */ 370 struct stripe_head *osh, *nsh; 371 LIST_HEAD(newstripes); 372 struct disk_info *ndisks; 373 int err = 0; 374 kmem_cache_t *sc; 375 int i; 376 377 if (newsize <= conf->pool_size) 378 return 0; /* never bother to shrink */ 379 380 /* Step 1 */ 381 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 382 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 383 0, 0, NULL, NULL); 384 if (!sc) 385 return -ENOMEM; 386 387 for (i = conf->max_nr_stripes; i; i--) { 388 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 389 if (!nsh) 390 break; 391 392 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 393 394 nsh->raid_conf = conf; 395 spin_lock_init(&nsh->lock); 396 397 list_add(&nsh->lru, &newstripes); 398 } 399 if (i) { 400 /* didn't get enough, give up */ 401 while (!list_empty(&newstripes)) { 402 nsh = list_entry(newstripes.next, struct stripe_head, lru); 403 list_del(&nsh->lru); 404 kmem_cache_free(sc, nsh); 405 } 406 kmem_cache_destroy(sc); 407 return -ENOMEM; 408 } 409 /* Step 2 - Must use GFP_NOIO now. 410 * OK, we have enough stripes, start collecting inactive 411 * stripes and copying them over 412 */ 413 list_for_each_entry(nsh, &newstripes, lru) { 414 spin_lock_irq(&conf->device_lock); 415 wait_event_lock_irq(conf->wait_for_stripe, 416 !list_empty(&conf->inactive_list), 417 conf->device_lock, 418 unplug_slaves(conf->mddev) 419 ); 420 osh = get_free_stripe(conf); 421 spin_unlock_irq(&conf->device_lock); 422 atomic_set(&nsh->count, 1); 423 for(i=0; i<conf->pool_size; i++) 424 nsh->dev[i].page = osh->dev[i].page; 425 for( ; i<newsize; i++) 426 nsh->dev[i].page = NULL; 427 kmem_cache_free(conf->slab_cache, osh); 428 } 429 kmem_cache_destroy(conf->slab_cache); 430 431 /* Step 3. 432 * At this point, we are holding all the stripes so the array 433 * is completely stalled, so now is a good time to resize 434 * conf->disks. 435 */ 436 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 437 if (ndisks) { 438 for (i=0; i<conf->raid_disks; i++) 439 ndisks[i] = conf->disks[i]; 440 kfree(conf->disks); 441 conf->disks = ndisks; 442 } else 443 err = -ENOMEM; 444 445 /* Step 4, return new stripes to service */ 446 while(!list_empty(&newstripes)) { 447 nsh = list_entry(newstripes.next, struct stripe_head, lru); 448 list_del_init(&nsh->lru); 449 for (i=conf->raid_disks; i < newsize; i++) 450 if (nsh->dev[i].page == NULL) { 451 struct page *p = alloc_page(GFP_NOIO); 452 nsh->dev[i].page = p; 453 if (!p) 454 err = -ENOMEM; 455 } 456 release_stripe(nsh); 457 } 458 /* critical section pass, GFP_NOIO no longer needed */ 459 460 conf->slab_cache = sc; 461 conf->active_name = 1-conf->active_name; 462 conf->pool_size = newsize; 463 return err; 464} 465#endif 466 467static int drop_one_stripe(raid5_conf_t *conf) 468{ 469 struct stripe_head *sh; 470 471 spin_lock_irq(&conf->device_lock); 472 sh = get_free_stripe(conf); 473 spin_unlock_irq(&conf->device_lock); 474 if (!sh) 475 return 0; 476 BUG_ON(atomic_read(&sh->count)); 477 shrink_buffers(sh, conf->pool_size); 478 kmem_cache_free(conf->slab_cache, sh); 479 atomic_dec(&conf->active_stripes); 480 return 1; 481} 482 483static void shrink_stripes(raid5_conf_t *conf) 484{ 485 while (drop_one_stripe(conf)) 486 ; 487 488 if (conf->slab_cache) 489 kmem_cache_destroy(conf->slab_cache); 490 conf->slab_cache = NULL; 491} 492 493static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, 494 int error) 495{ 496 struct stripe_head *sh = bi->bi_private; 497 raid5_conf_t *conf = sh->raid_conf; 498 int disks = sh->disks, i; 499 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 500 501 if (bi->bi_size) 502 return 1; 503 504 for (i=0 ; i<disks; i++) 505 if (bi == &sh->dev[i].req) 506 break; 507 508 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n", 509 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 510 uptodate); 511 if (i == disks) { 512 BUG(); 513 return 0; 514 } 515 516 if (uptodate) { 517#if 0 518 struct bio *bio; 519 unsigned long flags; 520 spin_lock_irqsave(&conf->device_lock, flags); 521 /* we can return a buffer if we bypassed the cache or 522 * if the top buffer is not in highmem. If there are 523 * multiple buffers, leave the extra work to 524 * handle_stripe 525 */ 526 buffer = sh->bh_read[i]; 527 if (buffer && 528 (!PageHighMem(buffer->b_page) 529 || buffer->b_page == bh->b_page ) 530 ) { 531 sh->bh_read[i] = buffer->b_reqnext; 532 buffer->b_reqnext = NULL; 533 } else 534 buffer = NULL; 535 spin_unlock_irqrestore(&conf->device_lock, flags); 536 if (sh->bh_page[i]==bh->b_page) 537 set_buffer_uptodate(bh); 538 if (buffer) { 539 if (buffer->b_page != bh->b_page) 540 memcpy(buffer->b_data, bh->b_data, bh->b_size); 541 buffer->b_end_io(buffer, 1); 542 } 543#else 544 set_bit(R5_UPTODATE, &sh->dev[i].flags); 545#endif 546 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 547 printk(KERN_INFO "raid5: read error corrected!!\n"); 548 clear_bit(R5_ReadError, &sh->dev[i].flags); 549 clear_bit(R5_ReWrite, &sh->dev[i].flags); 550 } 551 if (atomic_read(&conf->disks[i].rdev->read_errors)) 552 atomic_set(&conf->disks[i].rdev->read_errors, 0); 553 } else { 554 int retry = 0; 555 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 556 atomic_inc(&conf->disks[i].rdev->read_errors); 557 if (conf->mddev->degraded) 558 printk(KERN_WARNING "raid5: read error not correctable.\n"); 559 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 560 /* Oh, no!!! */ 561 printk(KERN_WARNING "raid5: read error NOT corrected!!\n"); 562 else if (atomic_read(&conf->disks[i].rdev->read_errors) 563 > conf->max_nr_stripes) 564 printk(KERN_WARNING 565 "raid5: Too many read errors, failing device.\n"); 566 else 567 retry = 1; 568 if (retry) 569 set_bit(R5_ReadError, &sh->dev[i].flags); 570 else { 571 clear_bit(R5_ReadError, &sh->dev[i].flags); 572 clear_bit(R5_ReWrite, &sh->dev[i].flags); 573 md_error(conf->mddev, conf->disks[i].rdev); 574 } 575 } 576 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 577#if 0 578 /* must restore b_page before unlocking buffer... */ 579 if (sh->bh_page[i] != bh->b_page) { 580 bh->b_page = sh->bh_page[i]; 581 bh->b_data = page_address(bh->b_page); 582 clear_buffer_uptodate(bh); 583 } 584#endif 585 clear_bit(R5_LOCKED, &sh->dev[i].flags); 586 set_bit(STRIPE_HANDLE, &sh->state); 587 release_stripe(sh); 588 return 0; 589} 590 591static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, 592 int error) 593{ 594 struct stripe_head *sh = bi->bi_private; 595 raid5_conf_t *conf = sh->raid_conf; 596 int disks = sh->disks, i; 597 unsigned long flags; 598 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 599 600 if (bi->bi_size) 601 return 1; 602 603 for (i=0 ; i<disks; i++) 604 if (bi == &sh->dev[i].req) 605 break; 606 607 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n", 608 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 609 uptodate); 610 if (i == disks) { 611 BUG(); 612 return 0; 613 } 614 615 spin_lock_irqsave(&conf->device_lock, flags); 616 if (!uptodate) 617 md_error(conf->mddev, conf->disks[i].rdev); 618 619 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 620 621 clear_bit(R5_LOCKED, &sh->dev[i].flags); 622 set_bit(STRIPE_HANDLE, &sh->state); 623 __release_stripe(conf, sh); 624 spin_unlock_irqrestore(&conf->device_lock, flags); 625 return 0; 626} 627 628 629static sector_t compute_blocknr(struct stripe_head *sh, int i); 630 631static void raid5_build_block (struct stripe_head *sh, int i) 632{ 633 struct r5dev *dev = &sh->dev[i]; 634 635 bio_init(&dev->req); 636 dev->req.bi_io_vec = &dev->vec; 637 dev->req.bi_vcnt++; 638 dev->req.bi_max_vecs++; 639 dev->vec.bv_page = dev->page; 640 dev->vec.bv_len = STRIPE_SIZE; 641 dev->vec.bv_offset = 0; 642 643 dev->req.bi_sector = sh->sector; 644 dev->req.bi_private = sh; 645 646 dev->flags = 0; 647 dev->sector = compute_blocknr(sh, i); 648} 649 650static void error(mddev_t *mddev, mdk_rdev_t *rdev) 651{ 652 char b[BDEVNAME_SIZE]; 653 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 654 PRINTK("raid5: error called\n"); 655 656 if (!test_bit(Faulty, &rdev->flags)) { 657 mddev->sb_dirty = 1; 658 if (test_bit(In_sync, &rdev->flags)) { 659 conf->working_disks--; 660 mddev->degraded++; 661 conf->failed_disks++; 662 clear_bit(In_sync, &rdev->flags); 663 /* 664 * if recovery was running, make sure it aborts. 665 */ 666 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 667 } 668 set_bit(Faulty, &rdev->flags); 669 printk (KERN_ALERT 670 "raid5: Disk failure on %s, disabling device." 671 " Operation continuing on %d devices\n", 672 bdevname(rdev->bdev,b), conf->working_disks); 673 } 674} 675 676/* 677 * Input: a 'big' sector number, 678 * Output: index of the data and parity disk, and the sector # in them. 679 */ 680static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, 681 unsigned int data_disks, unsigned int * dd_idx, 682 unsigned int * pd_idx, raid5_conf_t *conf) 683{ 684 long stripe; 685 unsigned long chunk_number; 686 unsigned int chunk_offset; 687 sector_t new_sector; 688 int sectors_per_chunk = conf->chunk_size >> 9; 689 690 /* First compute the information on this sector */ 691 692 /* 693 * Compute the chunk number and the sector offset inside the chunk 694 */ 695 chunk_offset = sector_div(r_sector, sectors_per_chunk); 696 chunk_number = r_sector; 697 BUG_ON(r_sector != chunk_number); 698 699 /* 700 * Compute the stripe number 701 */ 702 stripe = chunk_number / data_disks; 703 704 /* 705 * Compute the data disk and parity disk indexes inside the stripe 706 */ 707 *dd_idx = chunk_number % data_disks; 708 709 /* 710 * Select the parity disk based on the user selected algorithm. 711 */ 712 switch(conf->level) { 713 case 4: 714 *pd_idx = data_disks; 715 break; 716 case 5: 717 switch (conf->algorithm) { 718 case ALGORITHM_LEFT_ASYMMETRIC: 719 *pd_idx = data_disks - stripe % raid_disks; 720 if (*dd_idx >= *pd_idx) 721 (*dd_idx)++; 722 break; 723 case ALGORITHM_RIGHT_ASYMMETRIC: 724 *pd_idx = stripe % raid_disks; 725 if (*dd_idx >= *pd_idx) 726 (*dd_idx)++; 727 break; 728 case ALGORITHM_LEFT_SYMMETRIC: 729 *pd_idx = data_disks - stripe % raid_disks; 730 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 731 break; 732 case ALGORITHM_RIGHT_SYMMETRIC: 733 *pd_idx = stripe % raid_disks; 734 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 735 break; 736 default: 737 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 738 conf->algorithm); 739 } 740 break; 741 case 6: 742 743 /**** FIX THIS ****/ 744 switch (conf->algorithm) { 745 case ALGORITHM_LEFT_ASYMMETRIC: 746 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 747 if (*pd_idx == raid_disks-1) 748 (*dd_idx)++; /* Q D D D P */ 749 else if (*dd_idx >= *pd_idx) 750 (*dd_idx) += 2; /* D D P Q D */ 751 break; 752 case ALGORITHM_RIGHT_ASYMMETRIC: 753 *pd_idx = stripe % raid_disks; 754 if (*pd_idx == raid_disks-1) 755 (*dd_idx)++; /* Q D D D P */ 756 else if (*dd_idx >= *pd_idx) 757 (*dd_idx) += 2; /* D D P Q D */ 758 break; 759 case ALGORITHM_LEFT_SYMMETRIC: 760 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 761 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 762 break; 763 case ALGORITHM_RIGHT_SYMMETRIC: 764 *pd_idx = stripe % raid_disks; 765 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 766 break; 767 default: 768 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 769 conf->algorithm); 770 } 771 break; 772 } 773 774 /* 775 * Finally, compute the new sector number 776 */ 777 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 778 return new_sector; 779} 780 781 782static sector_t compute_blocknr(struct stripe_head *sh, int i) 783{ 784 raid5_conf_t *conf = sh->raid_conf; 785 int raid_disks = sh->disks, data_disks = raid_disks - 1; 786 sector_t new_sector = sh->sector, check; 787 int sectors_per_chunk = conf->chunk_size >> 9; 788 sector_t stripe; 789 int chunk_offset; 790 int chunk_number, dummy1, dummy2, dd_idx = i; 791 sector_t r_sector; 792 793 794 chunk_offset = sector_div(new_sector, sectors_per_chunk); 795 stripe = new_sector; 796 BUG_ON(new_sector != stripe); 797 798 if (i == sh->pd_idx) 799 return 0; 800 switch(conf->level) { 801 case 4: break; 802 case 5: 803 switch (conf->algorithm) { 804 case ALGORITHM_LEFT_ASYMMETRIC: 805 case ALGORITHM_RIGHT_ASYMMETRIC: 806 if (i > sh->pd_idx) 807 i--; 808 break; 809 case ALGORITHM_LEFT_SYMMETRIC: 810 case ALGORITHM_RIGHT_SYMMETRIC: 811 if (i < sh->pd_idx) 812 i += raid_disks; 813 i -= (sh->pd_idx + 1); 814 break; 815 default: 816 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 817 conf->algorithm); 818 } 819 break; 820 case 6: 821 data_disks = raid_disks - 2; 822 if (i == raid6_next_disk(sh->pd_idx, raid_disks)) 823 return 0; /* It is the Q disk */ 824 switch (conf->algorithm) { 825 case ALGORITHM_LEFT_ASYMMETRIC: 826 case ALGORITHM_RIGHT_ASYMMETRIC: 827 if (sh->pd_idx == raid_disks-1) 828 i--; /* Q D D D P */ 829 else if (i > sh->pd_idx) 830 i -= 2; /* D D P Q D */ 831 break; 832 case ALGORITHM_LEFT_SYMMETRIC: 833 case ALGORITHM_RIGHT_SYMMETRIC: 834 if (sh->pd_idx == raid_disks-1) 835 i--; /* Q D D D P */ 836 else { 837 /* D D P Q D */ 838 if (i < sh->pd_idx) 839 i += raid_disks; 840 i -= (sh->pd_idx + 2); 841 } 842 break; 843 default: 844 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 845 conf->algorithm); 846 } 847 break; 848 } 849 850 chunk_number = stripe * data_disks + i; 851 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 852 853 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 854 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 855 printk(KERN_ERR "compute_blocknr: map not correct\n"); 856 return 0; 857 } 858 return r_sector; 859} 860 861 862 863/* 864 * Copy data between a page in the stripe cache, and one or more bion 865 * The page could align with the middle of the bio, or there could be 866 * several bion, each with several bio_vecs, which cover part of the page 867 * Multiple bion are linked together on bi_next. There may be extras 868 * at the end of this list. We ignore them. 869 */ 870static void copy_data(int frombio, struct bio *bio, 871 struct page *page, 872 sector_t sector) 873{ 874 char *pa = page_address(page); 875 struct bio_vec *bvl; 876 int i; 877 int page_offset; 878 879 if (bio->bi_sector >= sector) 880 page_offset = (signed)(bio->bi_sector - sector) * 512; 881 else 882 page_offset = (signed)(sector - bio->bi_sector) * -512; 883 bio_for_each_segment(bvl, bio, i) { 884 int len = bio_iovec_idx(bio,i)->bv_len; 885 int clen; 886 int b_offset = 0; 887 888 if (page_offset < 0) { 889 b_offset = -page_offset; 890 page_offset += b_offset; 891 len -= b_offset; 892 } 893 894 if (len > 0 && page_offset + len > STRIPE_SIZE) 895 clen = STRIPE_SIZE - page_offset; 896 else clen = len; 897 898 if (clen > 0) { 899 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 900 if (frombio) 901 memcpy(pa+page_offset, ba+b_offset, clen); 902 else 903 memcpy(ba+b_offset, pa+page_offset, clen); 904 __bio_kunmap_atomic(ba, KM_USER0); 905 } 906 if (clen < len) /* hit end of page */ 907 break; 908 page_offset += len; 909 } 910} 911 912#define check_xor() do { \ 913 if (count == MAX_XOR_BLOCKS) { \ 914 xor_block(count, STRIPE_SIZE, ptr); \ 915 count = 1; \ 916 } \ 917 } while(0) 918 919 920static void compute_block(struct stripe_head *sh, int dd_idx) 921{ 922 int i, count, disks = sh->disks; 923 void *ptr[MAX_XOR_BLOCKS], *p; 924 925 PRINTK("compute_block, stripe %llu, idx %d\n", 926 (unsigned long long)sh->sector, dd_idx); 927 928 ptr[0] = page_address(sh->dev[dd_idx].page); 929 memset(ptr[0], 0, STRIPE_SIZE); 930 count = 1; 931 for (i = disks ; i--; ) { 932 if (i == dd_idx) 933 continue; 934 p = page_address(sh->dev[i].page); 935 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 936 ptr[count++] = p; 937 else 938 printk(KERN_ERR "compute_block() %d, stripe %llu, %d" 939 " not present\n", dd_idx, 940 (unsigned long long)sh->sector, i); 941 942 check_xor(); 943 } 944 if (count != 1) 945 xor_block(count, STRIPE_SIZE, ptr); 946 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 947} 948 949static void compute_parity5(struct stripe_head *sh, int method) 950{ 951 raid5_conf_t *conf = sh->raid_conf; 952 int i, pd_idx = sh->pd_idx, disks = sh->disks, count; 953 void *ptr[MAX_XOR_BLOCKS]; 954 struct bio *chosen; 955 956 PRINTK("compute_parity5, stripe %llu, method %d\n", 957 (unsigned long long)sh->sector, method); 958 959 count = 1; 960 ptr[0] = page_address(sh->dev[pd_idx].page); 961 switch(method) { 962 case READ_MODIFY_WRITE: 963 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags)); 964 for (i=disks ; i-- ;) { 965 if (i==pd_idx) 966 continue; 967 if (sh->dev[i].towrite && 968 test_bit(R5_UPTODATE, &sh->dev[i].flags)) { 969 ptr[count++] = page_address(sh->dev[i].page); 970 chosen = sh->dev[i].towrite; 971 sh->dev[i].towrite = NULL; 972 973 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 974 wake_up(&conf->wait_for_overlap); 975 976 BUG_ON(sh->dev[i].written); 977 sh->dev[i].written = chosen; 978 check_xor(); 979 } 980 } 981 break; 982 case RECONSTRUCT_WRITE: 983 memset(ptr[0], 0, STRIPE_SIZE); 984 for (i= disks; i-- ;) 985 if (i!=pd_idx && sh->dev[i].towrite) { 986 chosen = sh->dev[i].towrite; 987 sh->dev[i].towrite = NULL; 988 989 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 990 wake_up(&conf->wait_for_overlap); 991 992 BUG_ON(sh->dev[i].written); 993 sh->dev[i].written = chosen; 994 } 995 break; 996 case CHECK_PARITY: 997 break; 998 } 999 if (count>1) { 1000 xor_block(count, STRIPE_SIZE, ptr); 1001 count = 1; 1002 } 1003 1004 for (i = disks; i--;) 1005 if (sh->dev[i].written) { 1006 sector_t sector = sh->dev[i].sector; 1007 struct bio *wbi = sh->dev[i].written; 1008 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1009 copy_data(1, wbi, sh->dev[i].page, sector); 1010 wbi = r5_next_bio(wbi, sector); 1011 } 1012 1013 set_bit(R5_LOCKED, &sh->dev[i].flags); 1014 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1015 } 1016 1017 switch(method) { 1018 case RECONSTRUCT_WRITE: 1019 case CHECK_PARITY: 1020 for (i=disks; i--;) 1021 if (i != pd_idx) { 1022 ptr[count++] = page_address(sh->dev[i].page); 1023 check_xor(); 1024 } 1025 break; 1026 case READ_MODIFY_WRITE: 1027 for (i = disks; i--;) 1028 if (sh->dev[i].written) { 1029 ptr[count++] = page_address(sh->dev[i].page); 1030 check_xor(); 1031 } 1032 } 1033 if (count != 1) 1034 xor_block(count, STRIPE_SIZE, ptr); 1035 1036 if (method != CHECK_PARITY) { 1037 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1038 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1039 } else 1040 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1041} 1042 1043static void compute_parity6(struct stripe_head *sh, int method) 1044{ 1045 raid6_conf_t *conf = sh->raid_conf; 1046 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count; 1047 struct bio *chosen; 1048 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1049 void *ptrs[disks]; 1050 1051 qd_idx = raid6_next_disk(pd_idx, disks); 1052 d0_idx = raid6_next_disk(qd_idx, disks); 1053 1054 PRINTK("compute_parity, stripe %llu, method %d\n", 1055 (unsigned long long)sh->sector, method); 1056 1057 switch(method) { 1058 case READ_MODIFY_WRITE: 1059 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1060 case RECONSTRUCT_WRITE: 1061 for (i= disks; i-- ;) 1062 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1063 chosen = sh->dev[i].towrite; 1064 sh->dev[i].towrite = NULL; 1065 1066 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1067 wake_up(&conf->wait_for_overlap); 1068 1069 if (sh->dev[i].written) BUG(); 1070 sh->dev[i].written = chosen; 1071 } 1072 break; 1073 case CHECK_PARITY: 1074 BUG(); /* Not implemented yet */ 1075 } 1076 1077 for (i = disks; i--;) 1078 if (sh->dev[i].written) { 1079 sector_t sector = sh->dev[i].sector; 1080 struct bio *wbi = sh->dev[i].written; 1081 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1082 copy_data(1, wbi, sh->dev[i].page, sector); 1083 wbi = r5_next_bio(wbi, sector); 1084 } 1085 1086 set_bit(R5_LOCKED, &sh->dev[i].flags); 1087 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1088 } 1089 1090// switch(method) { 1091// case RECONSTRUCT_WRITE: 1092// case CHECK_PARITY: 1093// case UPDATE_PARITY: 1094 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ 1095 /* FIX: Is this ordering of drives even remotely optimal? */ 1096 count = 0; 1097 i = d0_idx; 1098 do { 1099 ptrs[count++] = page_address(sh->dev[i].page); 1100 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1101 printk("block %d/%d not uptodate on parity calc\n", i,count); 1102 i = raid6_next_disk(i, disks); 1103 } while ( i != d0_idx ); 1104// break; 1105// } 1106 1107 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); 1108 1109 switch(method) { 1110 case RECONSTRUCT_WRITE: 1111 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1112 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1113 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1114 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1115 break; 1116 case UPDATE_PARITY: 1117 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1118 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1119 break; 1120 } 1121} 1122 1123 1124/* Compute one missing block */ 1125static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1126{ 1127 raid6_conf_t *conf = sh->raid_conf; 1128 int i, count, disks = conf->raid_disks; 1129 void *ptr[MAX_XOR_BLOCKS], *p; 1130 int pd_idx = sh->pd_idx; 1131 int qd_idx = raid6_next_disk(pd_idx, disks); 1132 1133 PRINTK("compute_block_1, stripe %llu, idx %d\n", 1134 (unsigned long long)sh->sector, dd_idx); 1135 1136 if ( dd_idx == qd_idx ) { 1137 /* We're actually computing the Q drive */ 1138 compute_parity6(sh, UPDATE_PARITY); 1139 } else { 1140 ptr[0] = page_address(sh->dev[dd_idx].page); 1141 if (!nozero) memset(ptr[0], 0, STRIPE_SIZE); 1142 count = 1; 1143 for (i = disks ; i--; ) { 1144 if (i == dd_idx || i == qd_idx) 1145 continue; 1146 p = page_address(sh->dev[i].page); 1147 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1148 ptr[count++] = p; 1149 else 1150 printk("compute_block() %d, stripe %llu, %d" 1151 " not present\n", dd_idx, 1152 (unsigned long long)sh->sector, i); 1153 1154 check_xor(); 1155 } 1156 if (count != 1) 1157 xor_block(count, STRIPE_SIZE, ptr); 1158 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1159 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1160 } 1161} 1162 1163/* Compute two missing blocks */ 1164static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1165{ 1166 raid6_conf_t *conf = sh->raid_conf; 1167 int i, count, disks = conf->raid_disks; 1168 int pd_idx = sh->pd_idx; 1169 int qd_idx = raid6_next_disk(pd_idx, disks); 1170 int d0_idx = raid6_next_disk(qd_idx, disks); 1171 int faila, failb; 1172 1173 /* faila and failb are disk numbers relative to d0_idx */ 1174 /* pd_idx become disks-2 and qd_idx become disks-1 */ 1175 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; 1176 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; 1177 1178 BUG_ON(faila == failb); 1179 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1180 1181 PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1182 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); 1183 1184 if ( failb == disks-1 ) { 1185 /* Q disk is one of the missing disks */ 1186 if ( faila == disks-2 ) { 1187 /* Missing P+Q, just recompute */ 1188 compute_parity6(sh, UPDATE_PARITY); 1189 return; 1190 } else { 1191 /* We're missing D+Q; recompute D from P */ 1192 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); 1193 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1194 return; 1195 } 1196 } 1197 1198 /* We're missing D+P or D+D; build pointer table */ 1199 { 1200 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1201 void *ptrs[disks]; 1202 1203 count = 0; 1204 i = d0_idx; 1205 do { 1206 ptrs[count++] = page_address(sh->dev[i].page); 1207 i = raid6_next_disk(i, disks); 1208 if (i != dd_idx1 && i != dd_idx2 && 1209 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1210 printk("compute_2 with missing block %d/%d\n", count, i); 1211 } while ( i != d0_idx ); 1212 1213 if ( failb == disks-2 ) { 1214 /* We're missing D+P. */ 1215 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); 1216 } else { 1217 /* We're missing D+D. */ 1218 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); 1219 } 1220 1221 /* Both the above update both missing blocks */ 1222 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1223 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1224 } 1225} 1226 1227 1228 1229/* 1230 * Each stripe/dev can have one or more bion attached. 1231 * toread/towrite point to the first in a chain. 1232 * The bi_next chain must be in order. 1233 */ 1234static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1235{ 1236 struct bio **bip; 1237 raid5_conf_t *conf = sh->raid_conf; 1238 int firstwrite=0; 1239 1240 PRINTK("adding bh b#%llu to stripe s#%llu\n", 1241 (unsigned long long)bi->bi_sector, 1242 (unsigned long long)sh->sector); 1243 1244 1245 spin_lock(&sh->lock); 1246 spin_lock_irq(&conf->device_lock); 1247 if (forwrite) { 1248 bip = &sh->dev[dd_idx].towrite; 1249 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1250 firstwrite = 1; 1251 } else 1252 bip = &sh->dev[dd_idx].toread; 1253 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1254 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1255 goto overlap; 1256 bip = & (*bip)->bi_next; 1257 } 1258 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1259 goto overlap; 1260 1261 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1262 if (*bip) 1263 bi->bi_next = *bip; 1264 *bip = bi; 1265 bi->bi_phys_segments ++; 1266 spin_unlock_irq(&conf->device_lock); 1267 spin_unlock(&sh->lock); 1268 1269 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n", 1270 (unsigned long long)bi->bi_sector, 1271 (unsigned long long)sh->sector, dd_idx); 1272 1273 if (conf->mddev->bitmap && firstwrite) { 1274 sh->bm_seq = conf->seq_write; 1275 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1276 STRIPE_SECTORS, 0); 1277 set_bit(STRIPE_BIT_DELAY, &sh->state); 1278 } 1279 1280 if (forwrite) { 1281 /* check if page is covered */ 1282 sector_t sector = sh->dev[dd_idx].sector; 1283 for (bi=sh->dev[dd_idx].towrite; 1284 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1285 bi && bi->bi_sector <= sector; 1286 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1287 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1288 sector = bi->bi_sector + (bi->bi_size>>9); 1289 } 1290 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1291 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1292 } 1293 return 1; 1294 1295 overlap: 1296 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1297 spin_unlock_irq(&conf->device_lock); 1298 spin_unlock(&sh->lock); 1299 return 0; 1300} 1301 1302static void end_reshape(raid5_conf_t *conf); 1303 1304static int page_is_zero(struct page *p) 1305{ 1306 char *a = page_address(p); 1307 return ((*(u32*)a) == 0 && 1308 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1309} 1310 1311static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) 1312{ 1313 int sectors_per_chunk = conf->chunk_size >> 9; 1314 sector_t x = stripe; 1315 int pd_idx, dd_idx; 1316 int chunk_offset = sector_div(x, sectors_per_chunk); 1317 stripe = x; 1318 raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk 1319 + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf); 1320 return pd_idx; 1321} 1322 1323 1324/* 1325 * handle_stripe - do things to a stripe. 1326 * 1327 * We lock the stripe and then examine the state of various bits 1328 * to see what needs to be done. 1329 * Possible results: 1330 * return some read request which now have data 1331 * return some write requests which are safely on disc 1332 * schedule a read on some buffers 1333 * schedule a write of some buffers 1334 * return confirmation of parity correctness 1335 * 1336 * Parity calculations are done inside the stripe lock 1337 * buffers are taken off read_list or write_list, and bh_cache buffers 1338 * get BH_Lock set before the stripe lock is released. 1339 * 1340 */ 1341 1342static void handle_stripe5(struct stripe_head *sh) 1343{ 1344 raid5_conf_t *conf = sh->raid_conf; 1345 int disks = sh->disks; 1346 struct bio *return_bi= NULL; 1347 struct bio *bi; 1348 int i; 1349 int syncing, expanding, expanded; 1350 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; 1351 int non_overwrite = 0; 1352 int failed_num=0; 1353 struct r5dev *dev; 1354 1355 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n", 1356 (unsigned long long)sh->sector, atomic_read(&sh->count), 1357 sh->pd_idx); 1358 1359 spin_lock(&sh->lock); 1360 clear_bit(STRIPE_HANDLE, &sh->state); 1361 clear_bit(STRIPE_DELAYED, &sh->state); 1362 1363 syncing = test_bit(STRIPE_SYNCING, &sh->state); 1364 expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 1365 expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 1366 /* Now to look around and see what can be done */ 1367 1368 rcu_read_lock(); 1369 for (i=disks; i--; ) { 1370 mdk_rdev_t *rdev; 1371 dev = &sh->dev[i]; 1372 clear_bit(R5_Insync, &dev->flags); 1373 1374 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 1375 i, dev->flags, dev->toread, dev->towrite, dev->written); 1376 /* maybe we can reply to a read */ 1377 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 1378 struct bio *rbi, *rbi2; 1379 PRINTK("Return read for disc %d\n", i); 1380 spin_lock_irq(&conf->device_lock); 1381 rbi = dev->toread; 1382 dev->toread = NULL; 1383 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1384 wake_up(&conf->wait_for_overlap); 1385 spin_unlock_irq(&conf->device_lock); 1386 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1387 copy_data(0, rbi, dev->page, dev->sector); 1388 rbi2 = r5_next_bio(rbi, dev->sector); 1389 spin_lock_irq(&conf->device_lock); 1390 if (--rbi->bi_phys_segments == 0) { 1391 rbi->bi_next = return_bi; 1392 return_bi = rbi; 1393 } 1394 spin_unlock_irq(&conf->device_lock); 1395 rbi = rbi2; 1396 } 1397 } 1398 1399 /* now count some things */ 1400 if (test_bit(R5_LOCKED, &dev->flags)) locked++; 1401 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++; 1402 1403 1404 if (dev->toread) to_read++; 1405 if (dev->towrite) { 1406 to_write++; 1407 if (!test_bit(R5_OVERWRITE, &dev->flags)) 1408 non_overwrite++; 1409 } 1410 if (dev->written) written++; 1411 rdev = rcu_dereference(conf->disks[i].rdev); 1412 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 1413 /* The ReadError flag will just be confusing now */ 1414 clear_bit(R5_ReadError, &dev->flags); 1415 clear_bit(R5_ReWrite, &dev->flags); 1416 } 1417 if (!rdev || !test_bit(In_sync, &rdev->flags) 1418 || test_bit(R5_ReadError, &dev->flags)) { 1419 failed++; 1420 failed_num = i; 1421 } else 1422 set_bit(R5_Insync, &dev->flags); 1423 } 1424 rcu_read_unlock(); 1425 PRINTK("locked=%d uptodate=%d to_read=%d" 1426 " to_write=%d failed=%d failed_num=%d\n", 1427 locked, uptodate, to_read, to_write, failed, failed_num); 1428 /* check if the array has lost two devices and, if so, some requests might 1429 * need to be failed 1430 */ 1431 if (failed > 1 && to_read+to_write+written) { 1432 for (i=disks; i--; ) { 1433 int bitmap_end = 0; 1434 1435 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1436 mdk_rdev_t *rdev; 1437 rcu_read_lock(); 1438 rdev = rcu_dereference(conf->disks[i].rdev); 1439 if (rdev && test_bit(In_sync, &rdev->flags)) 1440 /* multiple read failures in one stripe */ 1441 md_error(conf->mddev, rdev); 1442 rcu_read_unlock(); 1443 } 1444 1445 spin_lock_irq(&conf->device_lock); 1446 /* fail all writes first */ 1447 bi = sh->dev[i].towrite; 1448 sh->dev[i].towrite = NULL; 1449 if (bi) { to_write--; bitmap_end = 1; } 1450 1451 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1452 wake_up(&conf->wait_for_overlap); 1453 1454 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1455 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1456 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1457 if (--bi->bi_phys_segments == 0) { 1458 md_write_end(conf->mddev); 1459 bi->bi_next = return_bi; 1460 return_bi = bi; 1461 } 1462 bi = nextbi; 1463 } 1464 /* and fail all 'written' */ 1465 bi = sh->dev[i].written; 1466 sh->dev[i].written = NULL; 1467 if (bi) bitmap_end = 1; 1468 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { 1469 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1470 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1471 if (--bi->bi_phys_segments == 0) { 1472 md_write_end(conf->mddev); 1473 bi->bi_next = return_bi; 1474 return_bi = bi; 1475 } 1476 bi = bi2; 1477 } 1478 1479 /* fail any reads if this device is non-operational */ 1480 if (!test_bit(R5_Insync, &sh->dev[i].flags) || 1481 test_bit(R5_ReadError, &sh->dev[i].flags)) { 1482 bi = sh->dev[i].toread; 1483 sh->dev[i].toread = NULL; 1484 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1485 wake_up(&conf->wait_for_overlap); 1486 if (bi) to_read--; 1487 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1488 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1489 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1490 if (--bi->bi_phys_segments == 0) { 1491 bi->bi_next = return_bi; 1492 return_bi = bi; 1493 } 1494 bi = nextbi; 1495 } 1496 } 1497 spin_unlock_irq(&conf->device_lock); 1498 if (bitmap_end) 1499 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1500 STRIPE_SECTORS, 0, 0); 1501 } 1502 } 1503 if (failed > 1 && syncing) { 1504 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 1505 clear_bit(STRIPE_SYNCING, &sh->state); 1506 syncing = 0; 1507 } 1508 1509 /* might be able to return some write requests if the parity block 1510 * is safe, or on a failed drive 1511 */ 1512 dev = &sh->dev[sh->pd_idx]; 1513 if ( written && 1514 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) && 1515 test_bit(R5_UPTODATE, &dev->flags)) 1516 || (failed == 1 && failed_num == sh->pd_idx)) 1517 ) { 1518 /* any written block on an uptodate or failed drive can be returned. 1519 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 1520 * never LOCKED, so we don't need to test 'failed' directly. 1521 */ 1522 for (i=disks; i--; ) 1523 if (sh->dev[i].written) { 1524 dev = &sh->dev[i]; 1525 if (!test_bit(R5_LOCKED, &dev->flags) && 1526 test_bit(R5_UPTODATE, &dev->flags) ) { 1527 /* We can return any write requests */ 1528 struct bio *wbi, *wbi2; 1529 int bitmap_end = 0; 1530 PRINTK("Return write for disc %d\n", i); 1531 spin_lock_irq(&conf->device_lock); 1532 wbi = dev->written; 1533 dev->written = NULL; 1534 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1535 wbi2 = r5_next_bio(wbi, dev->sector); 1536 if (--wbi->bi_phys_segments == 0) { 1537 md_write_end(conf->mddev); 1538 wbi->bi_next = return_bi; 1539 return_bi = wbi; 1540 } 1541 wbi = wbi2; 1542 } 1543 if (dev->towrite == NULL) 1544 bitmap_end = 1; 1545 spin_unlock_irq(&conf->device_lock); 1546 if (bitmap_end) 1547 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1548 STRIPE_SECTORS, 1549 !test_bit(STRIPE_DEGRADED, &sh->state), 0); 1550 } 1551 } 1552 } 1553 1554 /* Now we might consider reading some blocks, either to check/generate 1555 * parity, or to satisfy requests 1556 * or to load a block that is being partially written. 1557 */ 1558 if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) { 1559 for (i=disks; i--;) { 1560 dev = &sh->dev[i]; 1561 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1562 (dev->toread || 1563 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1564 syncing || 1565 expanding || 1566 (failed && (sh->dev[failed_num].toread || 1567 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags)))) 1568 ) 1569 ) { 1570 /* we would like to get this block, possibly 1571 * by computing it, but we might not be able to 1572 */ 1573 if (uptodate == disks-1) { 1574 PRINTK("Computing block %d\n", i); 1575 compute_block(sh, i); 1576 uptodate++; 1577 } else if (test_bit(R5_Insync, &dev->flags)) { 1578 set_bit(R5_LOCKED, &dev->flags); 1579 set_bit(R5_Wantread, &dev->flags); 1580#if 0 1581 /* if I am just reading this block and we don't have 1582 a failed drive, or any pending writes then sidestep the cache */ 1583 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext && 1584 ! syncing && !failed && !to_write) { 1585 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page; 1586 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data; 1587 } 1588#endif 1589 locked++; 1590 PRINTK("Reading block %d (sync=%d)\n", 1591 i, syncing); 1592 } 1593 } 1594 } 1595 set_bit(STRIPE_HANDLE, &sh->state); 1596 } 1597 1598 /* now to consider writing and what else, if anything should be read */ 1599 if (to_write) { 1600 int rmw=0, rcw=0; 1601 for (i=disks ; i--;) { 1602 /* would I have to read this buffer for read_modify_write */ 1603 dev = &sh->dev[i]; 1604 if ((dev->towrite || i == sh->pd_idx) && 1605 (!test_bit(R5_LOCKED, &dev->flags) 1606#if 0 1607|| sh->bh_page[i]!=bh->b_page 1608#endif 1609 ) && 1610 !test_bit(R5_UPTODATE, &dev->flags)) { 1611 if (test_bit(R5_Insync, &dev->flags) 1612/* && !(!mddev->insync && i == sh->pd_idx) */ 1613 ) 1614 rmw++; 1615 else rmw += 2*disks; /* cannot read it */ 1616 } 1617 /* Would I have to read this buffer for reconstruct_write */ 1618 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 1619 (!test_bit(R5_LOCKED, &dev->flags) 1620#if 0 1621|| sh->bh_page[i] != bh->b_page 1622#endif 1623 ) && 1624 !test_bit(R5_UPTODATE, &dev->flags)) { 1625 if (test_bit(R5_Insync, &dev->flags)) rcw++; 1626 else rcw += 2*disks; 1627 } 1628 } 1629 PRINTK("for sector %llu, rmw=%d rcw=%d\n", 1630 (unsigned long long)sh->sector, rmw, rcw); 1631 set_bit(STRIPE_HANDLE, &sh->state); 1632 if (rmw < rcw && rmw > 0) 1633 /* prefer read-modify-write, but need to get some data */ 1634 for (i=disks; i--;) { 1635 dev = &sh->dev[i]; 1636 if ((dev->towrite || i == sh->pd_idx) && 1637 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1638 test_bit(R5_Insync, &dev->flags)) { 1639 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1640 { 1641 PRINTK("Read_old block %d for r-m-w\n", i); 1642 set_bit(R5_LOCKED, &dev->flags); 1643 set_bit(R5_Wantread, &dev->flags); 1644 locked++; 1645 } else { 1646 set_bit(STRIPE_DELAYED, &sh->state); 1647 set_bit(STRIPE_HANDLE, &sh->state); 1648 } 1649 } 1650 } 1651 if (rcw <= rmw && rcw > 0) 1652 /* want reconstruct write, but need to get some data */ 1653 for (i=disks; i--;) { 1654 dev = &sh->dev[i]; 1655 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 1656 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1657 test_bit(R5_Insync, &dev->flags)) { 1658 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1659 { 1660 PRINTK("Read_old block %d for Reconstruct\n", i); 1661 set_bit(R5_LOCKED, &dev->flags); 1662 set_bit(R5_Wantread, &dev->flags); 1663 locked++; 1664 } else { 1665 set_bit(STRIPE_DELAYED, &sh->state); 1666 set_bit(STRIPE_HANDLE, &sh->state); 1667 } 1668 } 1669 } 1670 /* now if nothing is locked, and if we have enough data, we can start a write request */ 1671 if (locked == 0 && (rcw == 0 ||rmw == 0) && 1672 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 1673 PRINTK("Computing parity...\n"); 1674 compute_parity5(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE); 1675 /* now every locked buffer is ready to be written */ 1676 for (i=disks; i--;) 1677 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 1678 PRINTK("Writing block %d\n", i); 1679 locked++; 1680 set_bit(R5_Wantwrite, &sh->dev[i].flags); 1681 if (!test_bit(R5_Insync, &sh->dev[i].flags) 1682 || (i==sh->pd_idx && failed == 0)) 1683 set_bit(STRIPE_INSYNC, &sh->state); 1684 } 1685 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 1686 atomic_dec(&conf->preread_active_stripes); 1687 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 1688 md_wakeup_thread(conf->mddev->thread); 1689 } 1690 } 1691 } 1692 1693 /* maybe we need to check and possibly fix the parity for this stripe 1694 * Any reads will already have been scheduled, so we just see if enough data 1695 * is available 1696 */ 1697 if (syncing && locked == 0 && 1698 !test_bit(STRIPE_INSYNC, &sh->state)) { 1699 set_bit(STRIPE_HANDLE, &sh->state); 1700 if (failed == 0) { 1701 BUG_ON(uptodate != disks); 1702 compute_parity5(sh, CHECK_PARITY); 1703 uptodate--; 1704 if (page_is_zero(sh->dev[sh->pd_idx].page)) { 1705 /* parity is correct (on disc, not in buffer any more) */ 1706 set_bit(STRIPE_INSYNC, &sh->state); 1707 } else { 1708 conf->mddev->resync_mismatches += STRIPE_SECTORS; 1709 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 1710 /* don't try to repair!! */ 1711 set_bit(STRIPE_INSYNC, &sh->state); 1712 else { 1713 compute_block(sh, sh->pd_idx); 1714 uptodate++; 1715 } 1716 } 1717 } 1718 if (!test_bit(STRIPE_INSYNC, &sh->state)) { 1719 /* either failed parity check, or recovery is happening */ 1720 if (failed==0) 1721 failed_num = sh->pd_idx; 1722 dev = &sh->dev[failed_num]; 1723 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 1724 BUG_ON(uptodate != disks); 1725 1726 set_bit(R5_LOCKED, &dev->flags); 1727 set_bit(R5_Wantwrite, &dev->flags); 1728 clear_bit(STRIPE_DEGRADED, &sh->state); 1729 locked++; 1730 set_bit(STRIPE_INSYNC, &sh->state); 1731 } 1732 } 1733 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 1734 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 1735 clear_bit(STRIPE_SYNCING, &sh->state); 1736 } 1737 1738 /* If the failed drive is just a ReadError, then we might need to progress 1739 * the repair/check process 1740 */ 1741 if (failed == 1 && ! conf->mddev->ro && 1742 test_bit(R5_ReadError, &sh->dev[failed_num].flags) 1743 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags) 1744 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags) 1745 ) { 1746 dev = &sh->dev[failed_num]; 1747 if (!test_bit(R5_ReWrite, &dev->flags)) { 1748 set_bit(R5_Wantwrite, &dev->flags); 1749 set_bit(R5_ReWrite, &dev->flags); 1750 set_bit(R5_LOCKED, &dev->flags); 1751 locked++; 1752 } else { 1753 /* let's read it back */ 1754 set_bit(R5_Wantread, &dev->flags); 1755 set_bit(R5_LOCKED, &dev->flags); 1756 locked++; 1757 } 1758 } 1759 1760 if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 1761 /* Need to write out all blocks after computing parity */ 1762 sh->disks = conf->raid_disks; 1763 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks); 1764 compute_parity5(sh, RECONSTRUCT_WRITE); 1765 for (i= conf->raid_disks; i--;) { 1766 set_bit(R5_LOCKED, &sh->dev[i].flags); 1767 locked++; 1768 set_bit(R5_Wantwrite, &sh->dev[i].flags); 1769 } 1770 clear_bit(STRIPE_EXPANDING, &sh->state); 1771 } else if (expanded) { 1772 clear_bit(STRIPE_EXPAND_READY, &sh->state); 1773 atomic_dec(&conf->reshape_stripes); 1774 wake_up(&conf->wait_for_overlap); 1775 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 1776 } 1777 1778 if (expanding && locked == 0) { 1779 /* We have read all the blocks in this stripe and now we need to 1780 * copy some of them into a target stripe for expand. 1781 */ 1782 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 1783 for (i=0; i< sh->disks; i++) 1784 if (i != sh->pd_idx) { 1785 int dd_idx, pd_idx, j; 1786 struct stripe_head *sh2; 1787 1788 sector_t bn = compute_blocknr(sh, i); 1789 sector_t s = raid5_compute_sector(bn, conf->raid_disks, 1790 conf->raid_disks-1, 1791 &dd_idx, &pd_idx, conf); 1792 sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1); 1793 if (sh2 == NULL) 1794 /* so far only the early blocks of this stripe 1795 * have been requested. When later blocks 1796 * get requested, we will try again 1797 */ 1798 continue; 1799 if(!test_bit(STRIPE_EXPANDING, &sh2->state) || 1800 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 1801 /* must have already done this block */ 1802 release_stripe(sh2); 1803 continue; 1804 } 1805 memcpy(page_address(sh2->dev[dd_idx].page), 1806 page_address(sh->dev[i].page), 1807 STRIPE_SIZE); 1808 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 1809 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 1810 for (j=0; j<conf->raid_disks; j++) 1811 if (j != sh2->pd_idx && 1812 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 1813 break; 1814 if (j == conf->raid_disks) { 1815 set_bit(STRIPE_EXPAND_READY, &sh2->state); 1816 set_bit(STRIPE_HANDLE, &sh2->state); 1817 } 1818 release_stripe(sh2); 1819 } 1820 } 1821 1822 spin_unlock(&sh->lock); 1823 1824 while ((bi=return_bi)) { 1825 int bytes = bi->bi_size; 1826 1827 return_bi = bi->bi_next; 1828 bi->bi_next = NULL; 1829 bi->bi_size = 0; 1830 bi->bi_end_io(bi, bytes, 0); 1831 } 1832 for (i=disks; i-- ;) { 1833 int rw; 1834 struct bio *bi; 1835 mdk_rdev_t *rdev; 1836 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 1837 rw = 1; 1838 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1839 rw = 0; 1840 else 1841 continue; 1842 1843 bi = &sh->dev[i].req; 1844 1845 bi->bi_rw = rw; 1846 if (rw) 1847 bi->bi_end_io = raid5_end_write_request; 1848 else 1849 bi->bi_end_io = raid5_end_read_request; 1850 1851 rcu_read_lock(); 1852 rdev = rcu_dereference(conf->disks[i].rdev); 1853 if (rdev && test_bit(Faulty, &rdev->flags)) 1854 rdev = NULL; 1855 if (rdev) 1856 atomic_inc(&rdev->nr_pending); 1857 rcu_read_unlock(); 1858 1859 if (rdev) { 1860 if (syncing || expanding || expanded) 1861 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1862 1863 bi->bi_bdev = rdev->bdev; 1864 PRINTK("for %llu schedule op %ld on disc %d\n", 1865 (unsigned long long)sh->sector, bi->bi_rw, i); 1866 atomic_inc(&sh->count); 1867 bi->bi_sector = sh->sector + rdev->data_offset; 1868 bi->bi_flags = 1 << BIO_UPTODATE; 1869 bi->bi_vcnt = 1; 1870 bi->bi_max_vecs = 1; 1871 bi->bi_idx = 0; 1872 bi->bi_io_vec = &sh->dev[i].vec; 1873 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1874 bi->bi_io_vec[0].bv_offset = 0; 1875 bi->bi_size = STRIPE_SIZE; 1876 bi->bi_next = NULL; 1877 if (rw == WRITE && 1878 test_bit(R5_ReWrite, &sh->dev[i].flags)) 1879 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 1880 generic_make_request(bi); 1881 } else { 1882 if (rw == 1) 1883 set_bit(STRIPE_DEGRADED, &sh->state); 1884 PRINTK("skip op %ld on disc %d for sector %llu\n", 1885 bi->bi_rw, i, (unsigned long long)sh->sector); 1886 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1887 set_bit(STRIPE_HANDLE, &sh->state); 1888 } 1889 } 1890} 1891 1892static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 1893{ 1894 raid6_conf_t *conf = sh->raid_conf; 1895 int disks = conf->raid_disks; 1896 struct bio *return_bi= NULL; 1897 struct bio *bi; 1898 int i; 1899 int syncing; 1900 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; 1901 int non_overwrite = 0; 1902 int failed_num[2] = {0, 0}; 1903 struct r5dev *dev, *pdev, *qdev; 1904 int pd_idx = sh->pd_idx; 1905 int qd_idx = raid6_next_disk(pd_idx, disks); 1906 int p_failed, q_failed; 1907 1908 PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n", 1909 (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count), 1910 pd_idx, qd_idx); 1911 1912 spin_lock(&sh->lock); 1913 clear_bit(STRIPE_HANDLE, &sh->state); 1914 clear_bit(STRIPE_DELAYED, &sh->state); 1915 1916 syncing = test_bit(STRIPE_SYNCING, &sh->state); 1917 /* Now to look around and see what can be done */ 1918 1919 rcu_read_lock(); 1920 for (i=disks; i--; ) { 1921 mdk_rdev_t *rdev; 1922 dev = &sh->dev[i]; 1923 clear_bit(R5_Insync, &dev->flags); 1924 1925 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 1926 i, dev->flags, dev->toread, dev->towrite, dev->written); 1927 /* maybe we can reply to a read */ 1928 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 1929 struct bio *rbi, *rbi2; 1930 PRINTK("Return read for disc %d\n", i); 1931 spin_lock_irq(&conf->device_lock); 1932 rbi = dev->toread; 1933 dev->toread = NULL; 1934 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1935 wake_up(&conf->wait_for_overlap); 1936 spin_unlock_irq(&conf->device_lock); 1937 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1938 copy_data(0, rbi, dev->page, dev->sector); 1939 rbi2 = r5_next_bio(rbi, dev->sector); 1940 spin_lock_irq(&conf->device_lock); 1941 if (--rbi->bi_phys_segments == 0) { 1942 rbi->bi_next = return_bi; 1943 return_bi = rbi; 1944 } 1945 spin_unlock_irq(&conf->device_lock); 1946 rbi = rbi2; 1947 } 1948 } 1949 1950 /* now count some things */ 1951 if (test_bit(R5_LOCKED, &dev->flags)) locked++; 1952 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++; 1953 1954 1955 if (dev->toread) to_read++; 1956 if (dev->towrite) { 1957 to_write++; 1958 if (!test_bit(R5_OVERWRITE, &dev->flags)) 1959 non_overwrite++; 1960 } 1961 if (dev->written) written++; 1962 rdev = rcu_dereference(conf->disks[i].rdev); 1963 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 1964 /* The ReadError flag will just be confusing now */ 1965 clear_bit(R5_ReadError, &dev->flags); 1966 clear_bit(R5_ReWrite, &dev->flags); 1967 } 1968 if (!rdev || !test_bit(In_sync, &rdev->flags) 1969 || test_bit(R5_ReadError, &dev->flags)) { 1970 if ( failed < 2 ) 1971 failed_num[failed] = i; 1972 failed++; 1973 } else 1974 set_bit(R5_Insync, &dev->flags); 1975 } 1976 rcu_read_unlock(); 1977 PRINTK("locked=%d uptodate=%d to_read=%d" 1978 " to_write=%d failed=%d failed_num=%d,%d\n", 1979 locked, uptodate, to_read, to_write, failed, 1980 failed_num[0], failed_num[1]); 1981 /* check if the array has lost >2 devices and, if so, some requests might 1982 * need to be failed 1983 */ 1984 if (failed > 2 && to_read+to_write+written) { 1985 for (i=disks; i--; ) { 1986 int bitmap_end = 0; 1987 1988 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1989 mdk_rdev_t *rdev; 1990 rcu_read_lock(); 1991 rdev = rcu_dereference(conf->disks[i].rdev); 1992 if (rdev && test_bit(In_sync, &rdev->flags)) 1993 /* multiple read failures in one stripe */ 1994 md_error(conf->mddev, rdev); 1995 rcu_read_unlock(); 1996 } 1997 1998 spin_lock_irq(&conf->device_lock); 1999 /* fail all writes first */ 2000 bi = sh->dev[i].towrite; 2001 sh->dev[i].towrite = NULL; 2002 if (bi) { to_write--; bitmap_end = 1; } 2003 2004 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2005 wake_up(&conf->wait_for_overlap); 2006 2007 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 2008 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2009 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2010 if (--bi->bi_phys_segments == 0) { 2011 md_write_end(conf->mddev); 2012 bi->bi_next = return_bi; 2013 return_bi = bi; 2014 } 2015 bi = nextbi; 2016 } 2017 /* and fail all 'written' */ 2018 bi = sh->dev[i].written; 2019 sh->dev[i].written = NULL; 2020 if (bi) bitmap_end = 1; 2021 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { 2022 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2023 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2024 if (--bi->bi_phys_segments == 0) { 2025 md_write_end(conf->mddev); 2026 bi->bi_next = return_bi; 2027 return_bi = bi; 2028 } 2029 bi = bi2; 2030 } 2031 2032 /* fail any reads if this device is non-operational */ 2033 if (!test_bit(R5_Insync, &sh->dev[i].flags) || 2034 test_bit(R5_ReadError, &sh->dev[i].flags)) { 2035 bi = sh->dev[i].toread; 2036 sh->dev[i].toread = NULL; 2037 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2038 wake_up(&conf->wait_for_overlap); 2039 if (bi) to_read--; 2040 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 2041 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2042 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2043 if (--bi->bi_phys_segments == 0) { 2044 bi->bi_next = return_bi; 2045 return_bi = bi; 2046 } 2047 bi = nextbi; 2048 } 2049 } 2050 spin_unlock_irq(&conf->device_lock); 2051 if (bitmap_end) 2052 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2053 STRIPE_SECTORS, 0, 0); 2054 } 2055 } 2056 if (failed > 2 && syncing) { 2057 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2058 clear_bit(STRIPE_SYNCING, &sh->state); 2059 syncing = 0; 2060 } 2061 2062 /* 2063 * might be able to return some write requests if the parity blocks 2064 * are safe, or on a failed drive 2065 */ 2066 pdev = &sh->dev[pd_idx]; 2067 p_failed = (failed >= 1 && failed_num[0] == pd_idx) 2068 || (failed >= 2 && failed_num[1] == pd_idx); 2069 qdev = &sh->dev[qd_idx]; 2070 q_failed = (failed >= 1 && failed_num[0] == qd_idx) 2071 || (failed >= 2 && failed_num[1] == qd_idx); 2072 2073 if ( written && 2074 ( p_failed || ((test_bit(R5_Insync, &pdev->flags) 2075 && !test_bit(R5_LOCKED, &pdev->flags) 2076 && test_bit(R5_UPTODATE, &pdev->flags))) ) && 2077 ( q_failed || ((test_bit(R5_Insync, &qdev->flags) 2078 && !test_bit(R5_LOCKED, &qdev->flags) 2079 && test_bit(R5_UPTODATE, &qdev->flags))) ) ) { 2080 /* any written block on an uptodate or failed drive can be 2081 * returned. Note that if we 'wrote' to a failed drive, 2082 * it will be UPTODATE, but never LOCKED, so we don't need 2083 * to test 'failed' directly. 2084 */ 2085 for (i=disks; i--; ) 2086 if (sh->dev[i].written) { 2087 dev = &sh->dev[i]; 2088 if (!test_bit(R5_LOCKED, &dev->flags) && 2089 test_bit(R5_UPTODATE, &dev->flags) ) { 2090 /* We can return any write requests */ 2091 int bitmap_end = 0; 2092 struct bio *wbi, *wbi2; 2093 PRINTK("Return write for stripe %llu disc %d\n", 2094 (unsigned long long)sh->sector, i); 2095 spin_lock_irq(&conf->device_lock); 2096 wbi = dev->written; 2097 dev->written = NULL; 2098 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { 2099 wbi2 = r5_next_bio(wbi, dev->sector); 2100 if (--wbi->bi_phys_segments == 0) { 2101 md_write_end(conf->mddev); 2102 wbi->bi_next = return_bi; 2103 return_bi = wbi; 2104 } 2105 wbi = wbi2; 2106 } 2107 if (dev->towrite == NULL) 2108 bitmap_end = 1; 2109 spin_unlock_irq(&conf->device_lock); 2110 if (bitmap_end) 2111 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2112 STRIPE_SECTORS, 2113 !test_bit(STRIPE_DEGRADED, &sh->state), 0); 2114 } 2115 } 2116 } 2117 2118 /* Now we might consider reading some blocks, either to check/generate 2119 * parity, or to satisfy requests 2120 * or to load a block that is being partially written. 2121 */ 2122 if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) { 2123 for (i=disks; i--;) { 2124 dev = &sh->dev[i]; 2125 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 2126 (dev->toread || 2127 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2128 syncing || 2129 (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) || 2130 (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write)) 2131 ) 2132 ) { 2133 /* we would like to get this block, possibly 2134 * by computing it, but we might not be able to 2135 */ 2136 if (uptodate == disks-1) { 2137 PRINTK("Computing stripe %llu block %d\n", 2138 (unsigned long long)sh->sector, i); 2139 compute_block_1(sh, i, 0); 2140 uptodate++; 2141 } else if ( uptodate == disks-2 && failed >= 2 ) { 2142 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */ 2143 int other; 2144 for (other=disks; other--;) { 2145 if ( other == i ) 2146 continue; 2147 if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) ) 2148 break; 2149 } 2150 BUG_ON(other < 0); 2151 PRINTK("Computing stripe %llu blocks %d,%d\n", 2152 (unsigned long long)sh->sector, i, other); 2153 compute_block_2(sh, i, other); 2154 uptodate += 2; 2155 } else if (test_bit(R5_Insync, &dev->flags)) { 2156 set_bit(R5_LOCKED, &dev->flags); 2157 set_bit(R5_Wantread, &dev->flags); 2158#if 0 2159 /* if I am just reading this block and we don't have 2160 a failed drive, or any pending writes then sidestep the cache */ 2161 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext && 2162 ! syncing && !failed && !to_write) { 2163 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page; 2164 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data; 2165 } 2166#endif 2167 locked++; 2168 PRINTK("Reading block %d (sync=%d)\n", 2169 i, syncing); 2170 } 2171 } 2172 } 2173 set_bit(STRIPE_HANDLE, &sh->state); 2174 } 2175 2176 /* now to consider writing and what else, if anything should be read */ 2177 if (to_write) { 2178 int rcw=0, must_compute=0; 2179 for (i=disks ; i--;) { 2180 dev = &sh->dev[i]; 2181 /* Would I have to read this buffer for reconstruct_write */ 2182 if (!test_bit(R5_OVERWRITE, &dev->flags) 2183 && i != pd_idx && i != qd_idx 2184 && (!test_bit(R5_LOCKED, &dev->flags) 2185#if 0 2186 || sh->bh_page[i] != bh->b_page 2187#endif 2188 ) && 2189 !test_bit(R5_UPTODATE, &dev->flags)) { 2190 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2191 else { 2192 PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags); 2193 must_compute++; 2194 } 2195 } 2196 } 2197 PRINTK("for sector %llu, rcw=%d, must_compute=%d\n", 2198 (unsigned long long)sh->sector, rcw, must_compute); 2199 set_bit(STRIPE_HANDLE, &sh->state); 2200 2201 if (rcw > 0) 2202 /* want reconstruct write, but need to get some data */ 2203 for (i=disks; i--;) { 2204 dev = &sh->dev[i]; 2205 if (!test_bit(R5_OVERWRITE, &dev->flags) 2206 && !(failed == 0 && (i == pd_idx || i == qd_idx)) 2207 && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 2208 test_bit(R5_Insync, &dev->flags)) { 2209 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 2210 { 2211 PRINTK("Read_old stripe %llu block %d for Reconstruct\n", 2212 (unsigned long long)sh->sector, i); 2213 set_bit(R5_LOCKED, &dev->flags); 2214 set_bit(R5_Wantread, &dev->flags); 2215 locked++; 2216 } else { 2217 PRINTK("Request delayed stripe %llu block %d for Reconstruct\n", 2218 (unsigned long long)sh->sector, i); 2219 set_bit(STRIPE_DELAYED, &sh->state); 2220 set_bit(STRIPE_HANDLE, &sh->state); 2221 } 2222 } 2223 } 2224 /* now if nothing is locked, and if we have enough data, we can start a write request */ 2225 if (locked == 0 && rcw == 0 && 2226 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2227 if ( must_compute > 0 ) { 2228 /* We have failed blocks and need to compute them */ 2229 switch ( failed ) { 2230 case 0: BUG(); 2231 case 1: compute_block_1(sh, failed_num[0], 0); break; 2232 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break; 2233 default: BUG(); /* This request should have been failed? */ 2234 } 2235 } 2236 2237 PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector); 2238 compute_parity6(sh, RECONSTRUCT_WRITE); 2239 /* now every locked buffer is ready to be written */ 2240 for (i=disks; i--;) 2241 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2242 PRINTK("Writing stripe %llu block %d\n", 2243 (unsigned long long)sh->sector, i); 2244 locked++; 2245 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2246 } 2247 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2248 set_bit(STRIPE_INSYNC, &sh->state); 2249 2250 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2251 atomic_dec(&conf->preread_active_stripes); 2252 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 2253 md_wakeup_thread(conf->mddev->thread); 2254 } 2255 } 2256 } 2257 2258 /* maybe we need to check and possibly fix the parity for this stripe 2259 * Any reads will already have been scheduled, so we just see if enough data 2260 * is available 2261 */ 2262 if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) { 2263 int update_p = 0, update_q = 0; 2264 struct r5dev *dev; 2265 2266 set_bit(STRIPE_HANDLE, &sh->state); 2267 2268 BUG_ON(failed>2); 2269 BUG_ON(uptodate < disks); 2270 /* Want to check and possibly repair P and Q. 2271 * However there could be one 'failed' device, in which 2272 * case we can only check one of them, possibly using the 2273 * other to generate missing data 2274 */ 2275 2276 /* If !tmp_page, we cannot do the calculations, 2277 * but as we have set STRIPE_HANDLE, we will soon be called 2278 * by stripe_handle with a tmp_page - just wait until then. 2279 */ 2280 if (tmp_page) { 2281 if (failed == q_failed) { 2282 /* The only possible failed device holds 'Q', so it makes 2283 * sense to check P (If anything else were failed, we would 2284 * have used P to recreate it). 2285 */ 2286 compute_block_1(sh, pd_idx, 1); 2287 if (!page_is_zero(sh->dev[pd_idx].page)) { 2288 compute_block_1(sh,pd_idx,0); 2289 update_p = 1; 2290 } 2291 } 2292 if (!q_failed && failed < 2) { 2293 /* q is not failed, and we didn't use it to generate 2294 * anything, so it makes sense to check it 2295 */ 2296 memcpy(page_address(tmp_page), 2297 page_address(sh->dev[qd_idx].page), 2298 STRIPE_SIZE); 2299 compute_parity6(sh, UPDATE_PARITY); 2300 if (memcmp(page_address(tmp_page), 2301 page_address(sh->dev[qd_idx].page), 2302 STRIPE_SIZE)!= 0) { 2303 clear_bit(STRIPE_INSYNC, &sh->state); 2304 update_q = 1; 2305 } 2306 } 2307 if (update_p || update_q) { 2308 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2309 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2310 /* don't try to repair!! */ 2311 update_p = update_q = 0; 2312 } 2313 2314 /* now write out any block on a failed drive, 2315 * or P or Q if they need it 2316 */ 2317 2318 if (failed == 2) { 2319 dev = &sh->dev[failed_num[1]]; 2320 locked++; 2321 set_bit(R5_LOCKED, &dev->flags); 2322 set_bit(R5_Wantwrite, &dev->flags); 2323 } 2324 if (failed >= 1) { 2325 dev = &sh->dev[failed_num[0]]; 2326 locked++; 2327 set_bit(R5_LOCKED, &dev->flags); 2328 set_bit(R5_Wantwrite, &dev->flags); 2329 } 2330 2331 if (update_p) { 2332 dev = &sh->dev[pd_idx]; 2333 locked ++; 2334 set_bit(R5_LOCKED, &dev->flags); 2335 set_bit(R5_Wantwrite, &dev->flags); 2336 } 2337 if (update_q) { 2338 dev = &sh->dev[qd_idx]; 2339 locked++; 2340 set_bit(R5_LOCKED, &dev->flags); 2341 set_bit(R5_Wantwrite, &dev->flags); 2342 } 2343 clear_bit(STRIPE_DEGRADED, &sh->state); 2344 2345 set_bit(STRIPE_INSYNC, &sh->state); 2346 } 2347 } 2348 2349 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2350 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2351 clear_bit(STRIPE_SYNCING, &sh->state); 2352 } 2353 2354 /* If the failed drives are just a ReadError, then we might need 2355 * to progress the repair/check process 2356 */ 2357 if (failed <= 2 && ! conf->mddev->ro) 2358 for (i=0; i<failed;i++) { 2359 dev = &sh->dev[failed_num[i]]; 2360 if (test_bit(R5_ReadError, &dev->flags) 2361 && !test_bit(R5_LOCKED, &dev->flags) 2362 && test_bit(R5_UPTODATE, &dev->flags) 2363 ) { 2364 if (!test_bit(R5_ReWrite, &dev->flags)) { 2365 set_bit(R5_Wantwrite, &dev->flags); 2366 set_bit(R5_ReWrite, &dev->flags); 2367 set_bit(R5_LOCKED, &dev->flags); 2368 } else { 2369 /* let's read it back */ 2370 set_bit(R5_Wantread, &dev->flags); 2371 set_bit(R5_LOCKED, &dev->flags); 2372 } 2373 } 2374 } 2375 spin_unlock(&sh->lock); 2376 2377 while ((bi=return_bi)) { 2378 int bytes = bi->bi_size; 2379 2380 return_bi = bi->bi_next; 2381 bi->bi_next = NULL; 2382 bi->bi_size = 0; 2383 bi->bi_end_io(bi, bytes, 0); 2384 } 2385 for (i=disks; i-- ;) { 2386 int rw; 2387 struct bio *bi; 2388 mdk_rdev_t *rdev; 2389 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 2390 rw = 1; 2391 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 2392 rw = 0; 2393 else 2394 continue; 2395 2396 bi = &sh->dev[i].req; 2397 2398 bi->bi_rw = rw; 2399 if (rw) 2400 bi->bi_end_io = raid5_end_write_request; 2401 else 2402 bi->bi_end_io = raid5_end_read_request; 2403 2404 rcu_read_lock(); 2405 rdev = rcu_dereference(conf->disks[i].rdev); 2406 if (rdev && test_bit(Faulty, &rdev->flags)) 2407 rdev = NULL; 2408 if (rdev) 2409 atomic_inc(&rdev->nr_pending); 2410 rcu_read_unlock(); 2411 2412 if (rdev) { 2413 if (syncing) 2414 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 2415 2416 bi->bi_bdev = rdev->bdev; 2417 PRINTK("for %llu schedule op %ld on disc %d\n", 2418 (unsigned long long)sh->sector, bi->bi_rw, i); 2419 atomic_inc(&sh->count); 2420 bi->bi_sector = sh->sector + rdev->data_offset; 2421 bi->bi_flags = 1 << BIO_UPTODATE; 2422 bi->bi_vcnt = 1; 2423 bi->bi_max_vecs = 1; 2424 bi->bi_idx = 0; 2425 bi->bi_io_vec = &sh->dev[i].vec; 2426 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 2427 bi->bi_io_vec[0].bv_offset = 0; 2428 bi->bi_size = STRIPE_SIZE; 2429 bi->bi_next = NULL; 2430 if (rw == WRITE && 2431 test_bit(R5_ReWrite, &sh->dev[i].flags)) 2432 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2433 generic_make_request(bi); 2434 } else { 2435 if (rw == 1) 2436 set_bit(STRIPE_DEGRADED, &sh->state); 2437 PRINTK("skip op %ld on disc %d for sector %llu\n", 2438 bi->bi_rw, i, (unsigned long long)sh->sector); 2439 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2440 set_bit(STRIPE_HANDLE, &sh->state); 2441 } 2442 } 2443} 2444 2445static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) 2446{ 2447 if (sh->raid_conf->level == 6) 2448 handle_stripe6(sh, tmp_page); 2449 else 2450 handle_stripe5(sh); 2451} 2452 2453 2454 2455static void raid5_activate_delayed(raid5_conf_t *conf) 2456{ 2457 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 2458 while (!list_empty(&conf->delayed_list)) { 2459 struct list_head *l = conf->delayed_list.next; 2460 struct stripe_head *sh; 2461 sh = list_entry(l, struct stripe_head, lru); 2462 list_del_init(l); 2463 clear_bit(STRIPE_DELAYED, &sh->state); 2464 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 2465 atomic_inc(&conf->preread_active_stripes); 2466 list_add_tail(&sh->lru, &conf->handle_list); 2467 } 2468 } 2469} 2470 2471static void activate_bit_delay(raid5_conf_t *conf) 2472{ 2473 /* device_lock is held */ 2474 struct list_head head; 2475 list_add(&head, &conf->bitmap_list); 2476 list_del_init(&conf->bitmap_list); 2477 while (!list_empty(&head)) { 2478 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 2479 list_del_init(&sh->lru); 2480 atomic_inc(&sh->count); 2481 __release_stripe(conf, sh); 2482 } 2483} 2484 2485static void unplug_slaves(mddev_t *mddev) 2486{ 2487 raid5_conf_t *conf = mddev_to_conf(mddev); 2488 int i; 2489 2490 rcu_read_lock(); 2491 for (i=0; i<mddev->raid_disks; i++) { 2492 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 2493 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 2494 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 2495 2496 atomic_inc(&rdev->nr_pending); 2497 rcu_read_unlock(); 2498 2499 if (r_queue->unplug_fn) 2500 r_queue->unplug_fn(r_queue); 2501 2502 rdev_dec_pending(rdev, mddev); 2503 rcu_read_lock(); 2504 } 2505 } 2506 rcu_read_unlock(); 2507} 2508 2509static void raid5_unplug_device(request_queue_t *q) 2510{ 2511 mddev_t *mddev = q->queuedata; 2512 raid5_conf_t *conf = mddev_to_conf(mddev); 2513 unsigned long flags; 2514 2515 spin_lock_irqsave(&conf->device_lock, flags); 2516 2517 if (blk_remove_plug(q)) { 2518 conf->seq_flush++; 2519 raid5_activate_delayed(conf); 2520 } 2521 md_wakeup_thread(mddev->thread); 2522 2523 spin_unlock_irqrestore(&conf->device_lock, flags); 2524 2525 unplug_slaves(mddev); 2526} 2527 2528static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, 2529 sector_t *error_sector) 2530{ 2531 mddev_t *mddev = q->queuedata; 2532 raid5_conf_t *conf = mddev_to_conf(mddev); 2533 int i, ret = 0; 2534 2535 rcu_read_lock(); 2536 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 2537 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 2538 if (rdev && !test_bit(Faulty, &rdev->flags)) { 2539 struct block_device *bdev = rdev->bdev; 2540 request_queue_t *r_queue = bdev_get_queue(bdev); 2541 2542 if (!r_queue->issue_flush_fn) 2543 ret = -EOPNOTSUPP; 2544 else { 2545 atomic_inc(&rdev->nr_pending); 2546 rcu_read_unlock(); 2547 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, 2548 error_sector); 2549 rdev_dec_pending(rdev, mddev); 2550 rcu_read_lock(); 2551 } 2552 } 2553 } 2554 rcu_read_unlock(); 2555 return ret; 2556} 2557 2558static inline void raid5_plug_device(raid5_conf_t *conf) 2559{ 2560 spin_lock_irq(&conf->device_lock); 2561 blk_plug_device(conf->mddev->queue); 2562 spin_unlock_irq(&conf->device_lock); 2563} 2564 2565static int make_request(request_queue_t *q, struct bio * bi) 2566{ 2567 mddev_t *mddev = q->queuedata; 2568 raid5_conf_t *conf = mddev_to_conf(mddev); 2569 unsigned int dd_idx, pd_idx; 2570 sector_t new_sector; 2571 sector_t logical_sector, last_sector; 2572 struct stripe_head *sh; 2573 const int rw = bio_data_dir(bi); 2574 int remaining; 2575 2576 if (unlikely(bio_barrier(bi))) { 2577 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 2578 return 0; 2579 } 2580 2581 md_write_start(mddev, bi); 2582 2583 disk_stat_inc(mddev->gendisk, ios[rw]); 2584 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 2585 2586 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 2587 last_sector = bi->bi_sector + (bi->bi_size>>9); 2588 bi->bi_next = NULL; 2589 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 2590 2591 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 2592 DEFINE_WAIT(w); 2593 int disks, data_disks; 2594 2595 retry: 2596 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 2597 if (likely(conf->expand_progress == MaxSector)) 2598 disks = conf->raid_disks; 2599 else { 2600 /* spinlock is needed as expand_progress may be 2601 * 64bit on a 32bit platform, and so it might be 2602 * possible to see a half-updated value 2603 * Ofcourse expand_progress could change after 2604 * the lock is dropped, so once we get a reference 2605 * to the stripe that we think it is, we will have 2606 * to check again. 2607 */ 2608 spin_lock_irq(&conf->device_lock); 2609 disks = conf->raid_disks; 2610 if (logical_sector >= conf->expand_progress) 2611 disks = conf->previous_raid_disks; 2612 else { 2613 if (logical_sector >= conf->expand_lo) { 2614 spin_unlock_irq(&conf->device_lock); 2615 schedule(); 2616 goto retry; 2617 } 2618 } 2619 spin_unlock_irq(&conf->device_lock); 2620 } 2621 data_disks = disks - conf->max_degraded; 2622 2623 new_sector = raid5_compute_sector(logical_sector, disks, data_disks, 2624 &dd_idx, &pd_idx, conf); 2625 PRINTK("raid5: make_request, sector %llu logical %llu\n", 2626 (unsigned long long)new_sector, 2627 (unsigned long long)logical_sector); 2628 2629 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); 2630 if (sh) { 2631 if (unlikely(conf->expand_progress != MaxSector)) { 2632 /* expansion might have moved on while waiting for a 2633 * stripe, so we must do the range check again. 2634 * Expansion could still move past after this 2635 * test, but as we are holding a reference to 2636 * 'sh', we know that if that happens, 2637 * STRIPE_EXPANDING will get set and the expansion 2638 * won't proceed until we finish with the stripe. 2639 */ 2640 int must_retry = 0; 2641 spin_lock_irq(&conf->device_lock); 2642 if (logical_sector < conf->expand_progress && 2643 disks == conf->previous_raid_disks) 2644 /* mismatch, need to try again */ 2645 must_retry = 1; 2646 spin_unlock_irq(&conf->device_lock); 2647 if (must_retry) { 2648 release_stripe(sh); 2649 goto retry; 2650 } 2651 } 2652 /* FIXME what if we get a false positive because these 2653 * are being updated. 2654 */ 2655 if (logical_sector >= mddev->suspend_lo && 2656 logical_sector < mddev->suspend_hi) { 2657 release_stripe(sh); 2658 schedule(); 2659 goto retry; 2660 } 2661 2662 if (test_bit(STRIPE_EXPANDING, &sh->state) || 2663 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 2664 /* Stripe is busy expanding or 2665 * add failed due to overlap. Flush everything 2666 * and wait a while 2667 */ 2668 raid5_unplug_device(mddev->queue); 2669 release_stripe(sh); 2670 schedule(); 2671 goto retry; 2672 } 2673 finish_wait(&conf->wait_for_overlap, &w); 2674 raid5_plug_device(conf); 2675 handle_stripe(sh, NULL); 2676 release_stripe(sh); 2677 } else { 2678 /* cannot get stripe for read-ahead, just give-up */ 2679 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2680 finish_wait(&conf->wait_for_overlap, &w); 2681 break; 2682 } 2683 2684 } 2685 spin_lock_irq(&conf->device_lock); 2686 remaining = --bi->bi_phys_segments; 2687 spin_unlock_irq(&conf->device_lock); 2688 if (remaining == 0) { 2689 int bytes = bi->bi_size; 2690 2691 if ( rw == WRITE ) 2692 md_write_end(mddev); 2693 bi->bi_size = 0; 2694 bi->bi_end_io(bi, bytes, 0); 2695 } 2696 return 0; 2697} 2698 2699/* FIXME go_faster isn't used */ 2700static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 2701{ 2702 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 2703 struct stripe_head *sh; 2704 int pd_idx; 2705 sector_t first_sector, last_sector; 2706 int raid_disks = conf->raid_disks; 2707 int data_disks = raid_disks - conf->max_degraded; 2708 sector_t max_sector = mddev->size << 1; 2709 int sync_blocks; 2710 int still_degraded = 0; 2711 int i; 2712 2713 if (sector_nr >= max_sector) { 2714 /* just being told to finish up .. nothing much to do */ 2715 unplug_slaves(mddev); 2716 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2717 end_reshape(conf); 2718 return 0; 2719 } 2720 2721 if (mddev->curr_resync < max_sector) /* aborted */ 2722 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2723 &sync_blocks, 1); 2724 else /* completed sync */ 2725 conf->fullsync = 0; 2726 bitmap_close_sync(mddev->bitmap); 2727 2728 return 0; 2729 } 2730 2731 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2732 /* reshaping is quite different to recovery/resync so it is 2733 * handled quite separately ... here. 2734 * 2735 * On each call to sync_request, we gather one chunk worth of 2736 * destination stripes and flag them as expanding. 2737 * Then we find all the source stripes and request reads. 2738 * As the reads complete, handle_stripe will copy the data 2739 * into the destination stripe and release that stripe. 2740 */ 2741 int i; 2742 int dd_idx; 2743 sector_t writepos, safepos, gap; 2744 2745 if (sector_nr == 0 && 2746 conf->expand_progress != 0) { 2747 /* restarting in the middle, skip the initial sectors */ 2748 sector_nr = conf->expand_progress; 2749 sector_div(sector_nr, conf->raid_disks-1); 2750 *skipped = 1; 2751 return sector_nr; 2752 } 2753 2754 /* we update the metadata when there is more than 3Meg 2755 * in the block range (that is rather arbitrary, should 2756 * probably be time based) or when the data about to be 2757 * copied would over-write the source of the data at 2758 * the front of the range. 2759 * i.e. one new_stripe forward from expand_progress new_maps 2760 * to after where expand_lo old_maps to 2761 */ 2762 writepos = conf->expand_progress + 2763 conf->chunk_size/512*(conf->raid_disks-1); 2764 sector_div(writepos, conf->raid_disks-1); 2765 safepos = conf->expand_lo; 2766 sector_div(safepos, conf->previous_raid_disks-1); 2767 gap = conf->expand_progress - conf->expand_lo; 2768 2769 if (writepos >= safepos || 2770 gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) { 2771 /* Cannot proceed until we've updated the superblock... */ 2772 wait_event(conf->wait_for_overlap, 2773 atomic_read(&conf->reshape_stripes)==0); 2774 mddev->reshape_position = conf->expand_progress; 2775 mddev->sb_dirty = 1; 2776 md_wakeup_thread(mddev->thread); 2777 wait_event(mddev->sb_wait, mddev->sb_dirty == 0 || 2778 kthread_should_stop()); 2779 spin_lock_irq(&conf->device_lock); 2780 conf->expand_lo = mddev->reshape_position; 2781 spin_unlock_irq(&conf->device_lock); 2782 wake_up(&conf->wait_for_overlap); 2783 } 2784 2785 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { 2786 int j; 2787 int skipped = 0; 2788 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); 2789 sh = get_active_stripe(conf, sector_nr+i, 2790 conf->raid_disks, pd_idx, 0); 2791 set_bit(STRIPE_EXPANDING, &sh->state); 2792 atomic_inc(&conf->reshape_stripes); 2793 /* If any of this stripe is beyond the end of the old 2794 * array, then we need to zero those blocks 2795 */ 2796 for (j=sh->disks; j--;) { 2797 sector_t s; 2798 if (j == sh->pd_idx) 2799 continue; 2800 s = compute_blocknr(sh, j); 2801 if (s < (mddev->array_size<<1)) { 2802 skipped = 1; 2803 continue; 2804 } 2805 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 2806 set_bit(R5_Expanded, &sh->dev[j].flags); 2807 set_bit(R5_UPTODATE, &sh->dev[j].flags); 2808 } 2809 if (!skipped) { 2810 set_bit(STRIPE_EXPAND_READY, &sh->state); 2811 set_bit(STRIPE_HANDLE, &sh->state); 2812 } 2813 release_stripe(sh); 2814 } 2815 spin_lock_irq(&conf->device_lock); 2816 conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1); 2817 spin_unlock_irq(&conf->device_lock); 2818 /* Ok, those stripe are ready. We can start scheduling 2819 * reads on the source stripes. 2820 * The source stripes are determined by mapping the first and last 2821 * block on the destination stripes. 2822 */ 2823 raid_disks = conf->previous_raid_disks; 2824 data_disks = raid_disks - 1; 2825 first_sector = 2826 raid5_compute_sector(sector_nr*(conf->raid_disks-1), 2827 raid_disks, data_disks, 2828 &dd_idx, &pd_idx, conf); 2829 last_sector = 2830 raid5_compute_sector((sector_nr+conf->chunk_size/512) 2831 *(conf->raid_disks-1) -1, 2832 raid_disks, data_disks, 2833 &dd_idx, &pd_idx, conf); 2834 if (last_sector >= (mddev->size<<1)) 2835 last_sector = (mddev->size<<1)-1; 2836 while (first_sector <= last_sector) { 2837 pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks); 2838 sh = get_active_stripe(conf, first_sector, 2839 conf->previous_raid_disks, pd_idx, 0); 2840 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2841 set_bit(STRIPE_HANDLE, &sh->state); 2842 release_stripe(sh); 2843 first_sector += STRIPE_SECTORS; 2844 } 2845 return conf->chunk_size>>9; 2846 } 2847 /* if there is too many failed drives and we are trying 2848 * to resync, then assert that we are finished, because there is 2849 * nothing we can do. 2850 */ 2851 if (mddev->degraded >= (data_disks - raid_disks) && 2852 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2853 sector_t rv = (mddev->size << 1) - sector_nr; 2854 *skipped = 1; 2855 return rv; 2856 } 2857 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 2858 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2859 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 2860 /* we can skip this block, and probably more */ 2861 sync_blocks /= STRIPE_SECTORS; 2862 *skipped = 1; 2863 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 2864 } 2865 2866 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); 2867 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); 2868 if (sh == NULL) { 2869 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); 2870 /* make sure we don't swamp the stripe cache if someone else 2871 * is trying to get access 2872 */ 2873 schedule_timeout_uninterruptible(1); 2874 } 2875 /* Need to check if array will still be degraded after recovery/resync 2876 * We don't need to check the 'failed' flag as when that gets set, 2877 * recovery aborts. 2878 */ 2879 for (i=0; i<mddev->raid_disks; i++) 2880 if (conf->disks[i].rdev == NULL) 2881 still_degraded = 1; 2882 2883 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 2884 2885 spin_lock(&sh->lock); 2886 set_bit(STRIPE_SYNCING, &sh->state); 2887 clear_bit(STRIPE_INSYNC, &sh->state); 2888 spin_unlock(&sh->lock); 2889 2890 handle_stripe(sh, NULL); 2891 release_stripe(sh); 2892 2893 return STRIPE_SECTORS; 2894} 2895 2896/* 2897 * This is our raid5 kernel thread. 2898 * 2899 * We scan the hash table for stripes which can be handled now. 2900 * During the scan, completed stripes are saved for us by the interrupt 2901 * handler, so that they will not have to wait for our next wakeup. 2902 */ 2903static void raid5d (mddev_t *mddev) 2904{ 2905 struct stripe_head *sh; 2906 raid5_conf_t *conf = mddev_to_conf(mddev); 2907 int handled; 2908 2909 PRINTK("+++ raid5d active\n"); 2910 2911 md_check_recovery(mddev); 2912 2913 handled = 0; 2914 spin_lock_irq(&conf->device_lock); 2915 while (1) { 2916 struct list_head *first; 2917 2918 if (conf->seq_flush - conf->seq_write > 0) { 2919 int seq = conf->seq_flush; 2920 spin_unlock_irq(&conf->device_lock); 2921 bitmap_unplug(mddev->bitmap); 2922 spin_lock_irq(&conf->device_lock); 2923 conf->seq_write = seq; 2924 activate_bit_delay(conf); 2925 } 2926 2927 if (list_empty(&conf->handle_list) && 2928 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && 2929 !blk_queue_plugged(mddev->queue) && 2930 !list_empty(&conf->delayed_list)) 2931 raid5_activate_delayed(conf); 2932 2933 if (list_empty(&conf->handle_list)) 2934 break; 2935 2936 first = conf->handle_list.next; 2937 sh = list_entry(first, struct stripe_head, lru); 2938 2939 list_del_init(first); 2940 atomic_inc(&sh->count); 2941 BUG_ON(atomic_read(&sh->count)!= 1); 2942 spin_unlock_irq(&conf->device_lock); 2943 2944 handled++; 2945 handle_stripe(sh, conf->spare_page); 2946 release_stripe(sh); 2947 2948 spin_lock_irq(&conf->device_lock); 2949 } 2950 PRINTK("%d stripes handled\n", handled); 2951 2952 spin_unlock_irq(&conf->device_lock); 2953 2954 unplug_slaves(mddev); 2955 2956 PRINTK("--- raid5d inactive\n"); 2957} 2958 2959static ssize_t 2960raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 2961{ 2962 raid5_conf_t *conf = mddev_to_conf(mddev); 2963 if (conf) 2964 return sprintf(page, "%d\n", conf->max_nr_stripes); 2965 else 2966 return 0; 2967} 2968 2969static ssize_t 2970raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 2971{ 2972 raid5_conf_t *conf = mddev_to_conf(mddev); 2973 char *end; 2974 int new; 2975 if (len >= PAGE_SIZE) 2976 return -EINVAL; 2977 if (!conf) 2978 return -ENODEV; 2979 2980 new = simple_strtoul(page, &end, 10); 2981 if (!*page || (*end && *end != '\n') ) 2982 return -EINVAL; 2983 if (new <= 16 || new > 32768) 2984 return -EINVAL; 2985 while (new < conf->max_nr_stripes) { 2986 if (drop_one_stripe(conf)) 2987 conf->max_nr_stripes--; 2988 else 2989 break; 2990 } 2991 while (new > conf->max_nr_stripes) { 2992 if (grow_one_stripe(conf)) 2993 conf->max_nr_stripes++; 2994 else break; 2995 } 2996 return len; 2997} 2998 2999static struct md_sysfs_entry 3000raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 3001 raid5_show_stripe_cache_size, 3002 raid5_store_stripe_cache_size); 3003 3004static ssize_t 3005stripe_cache_active_show(mddev_t *mddev, char *page) 3006{ 3007 raid5_conf_t *conf = mddev_to_conf(mddev); 3008 if (conf) 3009 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 3010 else 3011 return 0; 3012} 3013 3014static struct md_sysfs_entry 3015raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 3016 3017static struct attribute *raid5_attrs[] = { 3018 &raid5_stripecache_size.attr, 3019 &raid5_stripecache_active.attr, 3020 NULL, 3021}; 3022static struct attribute_group raid5_attrs_group = { 3023 .name = NULL, 3024 .attrs = raid5_attrs, 3025}; 3026 3027static int run(mddev_t *mddev) 3028{ 3029 raid5_conf_t *conf; 3030 int raid_disk, memory; 3031 mdk_rdev_t *rdev; 3032 struct disk_info *disk; 3033 struct list_head *tmp; 3034 3035 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { 3036 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 3037 mdname(mddev), mddev->level); 3038 return -EIO; 3039 } 3040 3041 if (mddev->reshape_position != MaxSector) { 3042 /* Check that we can continue the reshape. 3043 * Currently only disks can change, it must 3044 * increase, and we must be past the point where 3045 * a stripe over-writes itself 3046 */ 3047 sector_t here_new, here_old; 3048 int old_disks; 3049 3050 if (mddev->new_level != mddev->level || 3051 mddev->new_layout != mddev->layout || 3052 mddev->new_chunk != mddev->chunk_size) { 3053 printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n", 3054 mdname(mddev)); 3055 return -EINVAL; 3056 } 3057 if (mddev->delta_disks <= 0) { 3058 printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n", 3059 mdname(mddev)); 3060 return -EINVAL; 3061 } 3062 old_disks = mddev->raid_disks - mddev->delta_disks; 3063 /* reshape_position must be on a new-stripe boundary, and one 3064 * further up in new geometry must map after here in old geometry. 3065 */ 3066 here_new = mddev->reshape_position; 3067 if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) { 3068 printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n"); 3069 return -EINVAL; 3070 } 3071 /* here_new is the stripe we will write to */ 3072 here_old = mddev->reshape_position; 3073 sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1)); 3074 /* here_old is the first stripe that we might need to read from */ 3075 if (here_new >= here_old) { 3076 /* Reading from the same stripe as writing to - bad */ 3077 printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n"); 3078 return -EINVAL; 3079 } 3080 printk(KERN_INFO "raid5: reshape will continue\n"); 3081 /* OK, we should be able to continue; */ 3082 } 3083 3084 3085 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); 3086 if ((conf = mddev->private) == NULL) 3087 goto abort; 3088 if (mddev->reshape_position == MaxSector) { 3089 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; 3090 } else { 3091 conf->raid_disks = mddev->raid_disks; 3092 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 3093 } 3094 3095 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 3096 GFP_KERNEL); 3097 if (!conf->disks) 3098 goto abort; 3099 3100 conf->mddev = mddev; 3101 3102 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 3103 goto abort; 3104 3105 if (mddev->level == 6) { 3106 conf->spare_page = alloc_page(GFP_KERNEL); 3107 if (!conf->spare_page) 3108 goto abort; 3109 } 3110 spin_lock_init(&conf->device_lock); 3111 init_waitqueue_head(&conf->wait_for_stripe); 3112 init_waitqueue_head(&conf->wait_for_overlap); 3113 INIT_LIST_HEAD(&conf->handle_list); 3114 INIT_LIST_HEAD(&conf->delayed_list); 3115 INIT_LIST_HEAD(&conf->bitmap_list); 3116 INIT_LIST_HEAD(&conf->inactive_list); 3117 atomic_set(&conf->active_stripes, 0); 3118 atomic_set(&conf->preread_active_stripes, 0); 3119 3120 PRINTK("raid5: run(%s) called.\n", mdname(mddev)); 3121 3122 ITERATE_RDEV(mddev,rdev,tmp) { 3123 raid_disk = rdev->raid_disk; 3124 if (raid_disk >= conf->raid_disks 3125 || raid_disk < 0) 3126 continue; 3127 disk = conf->disks + raid_disk; 3128 3129 disk->rdev = rdev; 3130 3131 if (test_bit(In_sync, &rdev->flags)) { 3132 char b[BDEVNAME_SIZE]; 3133 printk(KERN_INFO "raid5: device %s operational as raid" 3134 " disk %d\n", bdevname(rdev->bdev,b), 3135 raid_disk); 3136 conf->working_disks++; 3137 } 3138 } 3139 3140 /* 3141 * 0 for a fully functional array, 1 or 2 for a degraded array. 3142 */ 3143 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; 3144 conf->mddev = mddev; 3145 conf->chunk_size = mddev->chunk_size; 3146 conf->level = mddev->level; 3147 if (conf->level == 6) 3148 conf->max_degraded = 2; 3149 else 3150 conf->max_degraded = 1; 3151 conf->algorithm = mddev->layout; 3152 conf->max_nr_stripes = NR_STRIPES; 3153 conf->expand_progress = mddev->reshape_position; 3154 3155 /* device size must be a multiple of chunk size */ 3156 mddev->size &= ~(mddev->chunk_size/1024 -1); 3157 mddev->resync_max_sectors = mddev->size << 1; 3158 3159 if (conf->level == 6 && conf->raid_disks < 4) { 3160 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 3161 mdname(mddev), conf->raid_disks); 3162 goto abort; 3163 } 3164 if (!conf->chunk_size || conf->chunk_size % 4) { 3165 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 3166 conf->chunk_size, mdname(mddev)); 3167 goto abort; 3168 } 3169 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { 3170 printk(KERN_ERR 3171 "raid5: unsupported parity algorithm %d for %s\n", 3172 conf->algorithm, mdname(mddev)); 3173 goto abort; 3174 } 3175 if (mddev->degraded > conf->max_degraded) { 3176 printk(KERN_ERR "raid5: not enough operational devices for %s" 3177 " (%d/%d failed)\n", 3178 mdname(mddev), conf->failed_disks, conf->raid_disks); 3179 goto abort; 3180 } 3181 3182 if (mddev->degraded > 0 && 3183 mddev->recovery_cp != MaxSector) { 3184 if (mddev->ok_start_degraded) 3185 printk(KERN_WARNING 3186 "raid5: starting dirty degraded array: %s" 3187 "- data corruption possible.\n", 3188 mdname(mddev)); 3189 else { 3190 printk(KERN_ERR 3191 "raid5: cannot start dirty degraded array for %s\n", 3192 mdname(mddev)); 3193 goto abort; 3194 } 3195 } 3196 3197 { 3198 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 3199 if (!mddev->thread) { 3200 printk(KERN_ERR 3201 "raid5: couldn't allocate thread for %s\n", 3202 mdname(mddev)); 3203 goto abort; 3204 } 3205 } 3206 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 3207 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 3208 if (grow_stripes(conf, conf->max_nr_stripes)) { 3209 printk(KERN_ERR 3210 "raid5: couldn't allocate %dkB for buffers\n", memory); 3211 shrink_stripes(conf); 3212 md_unregister_thread(mddev->thread); 3213 goto abort; 3214 } else 3215 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 3216 memory, mdname(mddev)); 3217 3218 if (mddev->degraded == 0) 3219 printk("raid5: raid level %d set %s active with %d out of %d" 3220 " devices, algorithm %d\n", conf->level, mdname(mddev), 3221 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 3222 conf->algorithm); 3223 else 3224 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 3225 " out of %d devices, algorithm %d\n", conf->level, 3226 mdname(mddev), mddev->raid_disks - mddev->degraded, 3227 mddev->raid_disks, conf->algorithm); 3228 3229 print_raid5_conf(conf); 3230 3231 if (conf->expand_progress != MaxSector) { 3232 printk("...ok start reshape thread\n"); 3233 conf->expand_lo = conf->expand_progress; 3234 atomic_set(&conf->reshape_stripes, 0); 3235 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3236 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3237 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3238 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3239 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3240 "%s_reshape"); 3241 /* FIXME if md_register_thread fails?? */ 3242 md_wakeup_thread(mddev->sync_thread); 3243 3244 } 3245 3246 /* read-ahead size must cover two whole stripes, which is 3247 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 3248 */ 3249 { 3250 int data_disks = conf->previous_raid_disks - conf->max_degraded; 3251 int stripe = data_disks * 3252 (mddev->chunk_size / PAGE_SIZE); 3253 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3254 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3255 } 3256 3257 /* Ok, everything is just fine now */ 3258 sysfs_create_group(&mddev->kobj, &raid5_attrs_group); 3259 3260 mddev->queue->unplug_fn = raid5_unplug_device; 3261 mddev->queue->issue_flush_fn = raid5_issue_flush; 3262 mddev->array_size = mddev->size * (conf->previous_raid_disks - 3263 conf->max_degraded); 3264 3265 return 0; 3266abort: 3267 if (conf) { 3268 print_raid5_conf(conf); 3269 safe_put_page(conf->spare_page); 3270 kfree(conf->disks); 3271 kfree(conf->stripe_hashtbl); 3272 kfree(conf); 3273 } 3274 mddev->private = NULL; 3275 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 3276 return -EIO; 3277} 3278 3279 3280 3281static int stop(mddev_t *mddev) 3282{ 3283 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3284 3285 md_unregister_thread(mddev->thread); 3286 mddev->thread = NULL; 3287 shrink_stripes(conf); 3288 kfree(conf->stripe_hashtbl); 3289 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 3290 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 3291 kfree(conf->disks); 3292 kfree(conf); 3293 mddev->private = NULL; 3294 return 0; 3295} 3296 3297#if RAID5_DEBUG 3298static void print_sh (struct seq_file *seq, struct stripe_head *sh) 3299{ 3300 int i; 3301 3302 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 3303 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 3304 seq_printf(seq, "sh %llu, count %d.\n", 3305 (unsigned long long)sh->sector, atomic_read(&sh->count)); 3306 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 3307 for (i = 0; i < sh->disks; i++) { 3308 seq_printf(seq, "(cache%d: %p %ld) ", 3309 i, sh->dev[i].page, sh->dev[i].flags); 3310 } 3311 seq_printf(seq, "\n"); 3312} 3313 3314static void printall (struct seq_file *seq, raid5_conf_t *conf) 3315{ 3316 struct stripe_head *sh; 3317 struct hlist_node *hn; 3318 int i; 3319 3320 spin_lock_irq(&conf->device_lock); 3321 for (i = 0; i < NR_HASH; i++) { 3322 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 3323 if (sh->raid_conf != conf) 3324 continue; 3325 print_sh(seq, sh); 3326 } 3327 } 3328 spin_unlock_irq(&conf->device_lock); 3329} 3330#endif 3331 3332static void status (struct seq_file *seq, mddev_t *mddev) 3333{ 3334 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3335 int i; 3336 3337 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 3338 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks); 3339 for (i = 0; i < conf->raid_disks; i++) 3340 seq_printf (seq, "%s", 3341 conf->disks[i].rdev && 3342 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 3343 seq_printf (seq, "]"); 3344#if RAID5_DEBUG 3345 seq_printf (seq, "\n"); 3346 printall(seq, conf); 3347#endif 3348} 3349 3350static void print_raid5_conf (raid5_conf_t *conf) 3351{ 3352 int i; 3353 struct disk_info *tmp; 3354 3355 printk("RAID5 conf printout:\n"); 3356 if (!conf) { 3357 printk("(conf==NULL)\n"); 3358 return; 3359 } 3360 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks, 3361 conf->working_disks, conf->failed_disks); 3362 3363 for (i = 0; i < conf->raid_disks; i++) { 3364 char b[BDEVNAME_SIZE]; 3365 tmp = conf->disks + i; 3366 if (tmp->rdev) 3367 printk(" disk %d, o:%d, dev:%s\n", 3368 i, !test_bit(Faulty, &tmp->rdev->flags), 3369 bdevname(tmp->rdev->bdev,b)); 3370 } 3371} 3372 3373static int raid5_spare_active(mddev_t *mddev) 3374{ 3375 int i; 3376 raid5_conf_t *conf = mddev->private; 3377 struct disk_info *tmp; 3378 3379 for (i = 0; i < conf->raid_disks; i++) { 3380 tmp = conf->disks + i; 3381 if (tmp->rdev 3382 && !test_bit(Faulty, &tmp->rdev->flags) 3383 && !test_bit(In_sync, &tmp->rdev->flags)) { 3384 mddev->degraded--; 3385 conf->failed_disks--; 3386 conf->working_disks++; 3387 set_bit(In_sync, &tmp->rdev->flags); 3388 } 3389 } 3390 print_raid5_conf(conf); 3391 return 0; 3392} 3393 3394static int raid5_remove_disk(mddev_t *mddev, int number) 3395{ 3396 raid5_conf_t *conf = mddev->private; 3397 int err = 0; 3398 mdk_rdev_t *rdev; 3399 struct disk_info *p = conf->disks + number; 3400 3401 print_raid5_conf(conf); 3402 rdev = p->rdev; 3403 if (rdev) { 3404 if (test_bit(In_sync, &rdev->flags) || 3405 atomic_read(&rdev->nr_pending)) { 3406 err = -EBUSY; 3407 goto abort; 3408 } 3409 p->rdev = NULL; 3410 synchronize_rcu(); 3411 if (atomic_read(&rdev->nr_pending)) { 3412 /* lost the race, try later */ 3413 err = -EBUSY; 3414 p->rdev = rdev; 3415 } 3416 } 3417abort: 3418 3419 print_raid5_conf(conf); 3420 return err; 3421} 3422 3423static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 3424{ 3425 raid5_conf_t *conf = mddev->private; 3426 int found = 0; 3427 int disk; 3428 struct disk_info *p; 3429 3430 if (mddev->degraded > conf->max_degraded) 3431 /* no point adding a device */ 3432 return 0; 3433 3434 /* 3435 * find the disk ... but prefer rdev->saved_raid_disk 3436 * if possible. 3437 */ 3438 if (rdev->saved_raid_disk >= 0 && 3439 conf->disks[rdev->saved_raid_disk].rdev == NULL) 3440 disk = rdev->saved_raid_disk; 3441 else 3442 disk = 0; 3443 for ( ; disk < conf->raid_disks; disk++) 3444 if ((p=conf->disks + disk)->rdev == NULL) { 3445 clear_bit(In_sync, &rdev->flags); 3446 rdev->raid_disk = disk; 3447 found = 1; 3448 if (rdev->saved_raid_disk != disk) 3449 conf->fullsync = 1; 3450 rcu_assign_pointer(p->rdev, rdev); 3451 break; 3452 } 3453 print_raid5_conf(conf); 3454 return found; 3455} 3456 3457static int raid5_resize(mddev_t *mddev, sector_t sectors) 3458{ 3459 /* no resync is happening, and there is enough space 3460 * on all devices, so we can resize. 3461 * We need to make sure resync covers any new space. 3462 * If the array is shrinking we should possibly wait until 3463 * any io in the removed space completes, but it hardly seems 3464 * worth it. 3465 */ 3466 raid5_conf_t *conf = mddev_to_conf(mddev); 3467 3468 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 3469 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 3470 set_capacity(mddev->gendisk, mddev->array_size << 1); 3471 mddev->changed = 1; 3472 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 3473 mddev->recovery_cp = mddev->size << 1; 3474 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3475 } 3476 mddev->size = sectors /2; 3477 mddev->resync_max_sectors = sectors; 3478 return 0; 3479} 3480 3481#ifdef CONFIG_MD_RAID5_RESHAPE 3482static int raid5_check_reshape(mddev_t *mddev) 3483{ 3484 raid5_conf_t *conf = mddev_to_conf(mddev); 3485 int err; 3486 3487 if (mddev->delta_disks < 0 || 3488 mddev->new_level != mddev->level) 3489 return -EINVAL; /* Cannot shrink array or change level yet */ 3490 if (mddev->delta_disks == 0) 3491 return 0; /* nothing to do */ 3492 3493 /* Can only proceed if there are plenty of stripe_heads. 3494 * We need a minimum of one full stripe,, and for sensible progress 3495 * it is best to have about 4 times that. 3496 * If we require 4 times, then the default 256 4K stripe_heads will 3497 * allow for chunk sizes up to 256K, which is probably OK. 3498 * If the chunk size is greater, user-space should request more 3499 * stripe_heads first. 3500 */ 3501 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 3502 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 3503 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 3504 (mddev->chunk_size / STRIPE_SIZE)*4); 3505 return -ENOSPC; 3506 } 3507 3508 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 3509 if (err) 3510 return err; 3511 3512 /* looks like we might be able to manage this */ 3513 return 0; 3514} 3515 3516static int raid5_start_reshape(mddev_t *mddev) 3517{ 3518 raid5_conf_t *conf = mddev_to_conf(mddev); 3519 mdk_rdev_t *rdev; 3520 struct list_head *rtmp; 3521 int spares = 0; 3522 int added_devices = 0; 3523 3524 if (mddev->degraded || 3525 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3526 return -EBUSY; 3527 3528 ITERATE_RDEV(mddev, rdev, rtmp) 3529 if (rdev->raid_disk < 0 && 3530 !test_bit(Faulty, &rdev->flags)) 3531 spares++; 3532 3533 if (spares < mddev->delta_disks-1) 3534 /* Not enough devices even to make a degraded array 3535 * of that size 3536 */ 3537 return -EINVAL; 3538 3539 atomic_set(&conf->reshape_stripes, 0); 3540 spin_lock_irq(&conf->device_lock); 3541 conf->previous_raid_disks = conf->raid_disks; 3542 conf->raid_disks += mddev->delta_disks; 3543 conf->expand_progress = 0; 3544 conf->expand_lo = 0; 3545 spin_unlock_irq(&conf->device_lock); 3546 3547 /* Add some new drives, as many as will fit. 3548 * We know there are enough to make the newly sized array work. 3549 */ 3550 ITERATE_RDEV(mddev, rdev, rtmp) 3551 if (rdev->raid_disk < 0 && 3552 !test_bit(Faulty, &rdev->flags)) { 3553 if (raid5_add_disk(mddev, rdev)) { 3554 char nm[20]; 3555 set_bit(In_sync, &rdev->flags); 3556 conf->working_disks++; 3557 added_devices++; 3558 rdev->recovery_offset = 0; 3559 sprintf(nm, "rd%d", rdev->raid_disk); 3560 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 3561 } else 3562 break; 3563 } 3564 3565 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; 3566 mddev->raid_disks = conf->raid_disks; 3567 mddev->reshape_position = 0; 3568 mddev->sb_dirty = 1; 3569 3570 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3571 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3572 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3573 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3574 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3575 "%s_reshape"); 3576 if (!mddev->sync_thread) { 3577 mddev->recovery = 0; 3578 spin_lock_irq(&conf->device_lock); 3579 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 3580 conf->expand_progress = MaxSector; 3581 spin_unlock_irq(&conf->device_lock); 3582 return -EAGAIN; 3583 } 3584 md_wakeup_thread(mddev->sync_thread); 3585 md_new_event(mddev); 3586 return 0; 3587} 3588#endif 3589 3590static void end_reshape(raid5_conf_t *conf) 3591{ 3592 struct block_device *bdev; 3593 3594 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 3595 conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1); 3596 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 3597 conf->mddev->changed = 1; 3598 3599 bdev = bdget_disk(conf->mddev->gendisk, 0); 3600 if (bdev) { 3601 mutex_lock(&bdev->bd_inode->i_mutex); 3602 i_size_write(bdev->bd_inode, conf->mddev->array_size << 10); 3603 mutex_unlock(&bdev->bd_inode->i_mutex); 3604 bdput(bdev); 3605 } 3606 spin_lock_irq(&conf->device_lock); 3607 conf->expand_progress = MaxSector; 3608 spin_unlock_irq(&conf->device_lock); 3609 conf->mddev->reshape_position = MaxSector; 3610 3611 /* read-ahead size must cover two whole stripes, which is 3612 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 3613 */ 3614 { 3615 int data_disks = conf->previous_raid_disks - conf->max_degraded; 3616 int stripe = data_disks * 3617 (conf->mddev->chunk_size / PAGE_SIZE); 3618 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3619 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3620 } 3621 } 3622} 3623 3624static void raid5_quiesce(mddev_t *mddev, int state) 3625{ 3626 raid5_conf_t *conf = mddev_to_conf(mddev); 3627 3628 switch(state) { 3629 case 2: /* resume for a suspend */ 3630 wake_up(&conf->wait_for_overlap); 3631 break; 3632 3633 case 1: /* stop all writes */ 3634 spin_lock_irq(&conf->device_lock); 3635 conf->quiesce = 1; 3636 wait_event_lock_irq(conf->wait_for_stripe, 3637 atomic_read(&conf->active_stripes) == 0, 3638 conf->device_lock, /* nothing */); 3639 spin_unlock_irq(&conf->device_lock); 3640 break; 3641 3642 case 0: /* re-enable writes */ 3643 spin_lock_irq(&conf->device_lock); 3644 conf->quiesce = 0; 3645 wake_up(&conf->wait_for_stripe); 3646 wake_up(&conf->wait_for_overlap); 3647 spin_unlock_irq(&conf->device_lock); 3648 break; 3649 } 3650} 3651 3652static struct mdk_personality raid6_personality = 3653{ 3654 .name = "raid6", 3655 .level = 6, 3656 .owner = THIS_MODULE, 3657 .make_request = make_request, 3658 .run = run, 3659 .stop = stop, 3660 .status = status, 3661 .error_handler = error, 3662 .hot_add_disk = raid5_add_disk, 3663 .hot_remove_disk= raid5_remove_disk, 3664 .spare_active = raid5_spare_active, 3665 .sync_request = sync_request, 3666 .resize = raid5_resize, 3667 .quiesce = raid5_quiesce, 3668}; 3669static struct mdk_personality raid5_personality = 3670{ 3671 .name = "raid5", 3672 .level = 5, 3673 .owner = THIS_MODULE, 3674 .make_request = make_request, 3675 .run = run, 3676 .stop = stop, 3677 .status = status, 3678 .error_handler = error, 3679 .hot_add_disk = raid5_add_disk, 3680 .hot_remove_disk= raid5_remove_disk, 3681 .spare_active = raid5_spare_active, 3682 .sync_request = sync_request, 3683 .resize = raid5_resize, 3684#ifdef CONFIG_MD_RAID5_RESHAPE 3685 .check_reshape = raid5_check_reshape, 3686 .start_reshape = raid5_start_reshape, 3687#endif 3688 .quiesce = raid5_quiesce, 3689}; 3690 3691static struct mdk_personality raid4_personality = 3692{ 3693 .name = "raid4", 3694 .level = 4, 3695 .owner = THIS_MODULE, 3696 .make_request = make_request, 3697 .run = run, 3698 .stop = stop, 3699 .status = status, 3700 .error_handler = error, 3701 .hot_add_disk = raid5_add_disk, 3702 .hot_remove_disk= raid5_remove_disk, 3703 .spare_active = raid5_spare_active, 3704 .sync_request = sync_request, 3705 .resize = raid5_resize, 3706 .quiesce = raid5_quiesce, 3707}; 3708 3709static int __init raid5_init(void) 3710{ 3711 int e; 3712 3713 e = raid6_select_algo(); 3714 if ( e ) 3715 return e; 3716 register_md_personality(&raid6_personality); 3717 register_md_personality(&raid5_personality); 3718 register_md_personality(&raid4_personality); 3719 return 0; 3720} 3721 3722static void raid5_exit(void) 3723{ 3724 unregister_md_personality(&raid6_personality); 3725 unregister_md_personality(&raid5_personality); 3726 unregister_md_personality(&raid4_personality); 3727} 3728 3729module_init(raid5_init); 3730module_exit(raid5_exit); 3731MODULE_LICENSE("GPL"); 3732MODULE_ALIAS("md-personality-4"); /* RAID5 */ 3733MODULE_ALIAS("md-raid5"); 3734MODULE_ALIAS("md-raid4"); 3735MODULE_ALIAS("md-level-5"); 3736MODULE_ALIAS("md-level-4"); 3737MODULE_ALIAS("md-personality-8"); /* RAID6 */ 3738MODULE_ALIAS("md-raid6"); 3739MODULE_ALIAS("md-level-6"); 3740 3741/* This used to be two separate modules, they were: */ 3742MODULE_ALIAS("raid5"); 3743MODULE_ALIAS("raid6"); 3744