raid5.c revision d9d166c2a9d5d01af34396793950aa695883eed4
1/* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * 6 * RAID-5 management functions. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * You should have received a copy of the GNU General Public License 14 * (for example /usr/src/linux/COPYING); if not, write to the Free 15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 16 */ 17 18 19#include <linux/config.h> 20#include <linux/module.h> 21#include <linux/slab.h> 22#include <linux/raid/raid5.h> 23#include <linux/highmem.h> 24#include <linux/bitops.h> 25#include <asm/atomic.h> 26 27#include <linux/raid/bitmap.h> 28 29/* 30 * Stripe cache 31 */ 32 33#define NR_STRIPES 256 34#define STRIPE_SIZE PAGE_SIZE 35#define STRIPE_SHIFT (PAGE_SHIFT - 9) 36#define STRIPE_SECTORS (STRIPE_SIZE>>9) 37#define IO_THRESHOLD 1 38#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 39#define HASH_MASK (NR_HASH - 1) 40 41#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 42 43/* bio's attached to a stripe+device for I/O are linked together in bi_sector 44 * order without overlap. There may be several bio's per stripe+device, and 45 * a bio could span several devices. 46 * When walking this list for a particular stripe+device, we must never proceed 47 * beyond a bio that extends past this device, as the next bio might no longer 48 * be valid. 49 * This macro is used to determine the 'next' bio in the list, given the sector 50 * of the current stripe+device 51 */ 52#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 53/* 54 * The following can be used to debug the driver 55 */ 56#define RAID5_DEBUG 0 57#define RAID5_PARANOIA 1 58#if RAID5_PARANOIA && defined(CONFIG_SMP) 59# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 60#else 61# define CHECK_DEVLOCK() 62#endif 63 64#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x))) 65#if RAID5_DEBUG 66#define inline 67#define __inline__ 68#endif 69 70static void print_raid5_conf (raid5_conf_t *conf); 71 72static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 73{ 74 if (atomic_dec_and_test(&sh->count)) { 75 if (!list_empty(&sh->lru)) 76 BUG(); 77 if (atomic_read(&conf->active_stripes)==0) 78 BUG(); 79 if (test_bit(STRIPE_HANDLE, &sh->state)) { 80 if (test_bit(STRIPE_DELAYED, &sh->state)) 81 list_add_tail(&sh->lru, &conf->delayed_list); 82 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 83 conf->seq_write == sh->bm_seq) 84 list_add_tail(&sh->lru, &conf->bitmap_list); 85 else { 86 clear_bit(STRIPE_BIT_DELAY, &sh->state); 87 list_add_tail(&sh->lru, &conf->handle_list); 88 } 89 md_wakeup_thread(conf->mddev->thread); 90 } else { 91 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 92 atomic_dec(&conf->preread_active_stripes); 93 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 94 md_wakeup_thread(conf->mddev->thread); 95 } 96 list_add_tail(&sh->lru, &conf->inactive_list); 97 atomic_dec(&conf->active_stripes); 98 if (!conf->inactive_blocked || 99 atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4)) 100 wake_up(&conf->wait_for_stripe); 101 } 102 } 103} 104static void release_stripe(struct stripe_head *sh) 105{ 106 raid5_conf_t *conf = sh->raid_conf; 107 unsigned long flags; 108 109 spin_lock_irqsave(&conf->device_lock, flags); 110 __release_stripe(conf, sh); 111 spin_unlock_irqrestore(&conf->device_lock, flags); 112} 113 114static inline void remove_hash(struct stripe_head *sh) 115{ 116 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 117 118 hlist_del_init(&sh->hash); 119} 120 121static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 122{ 123 struct hlist_head *hp = stripe_hash(conf, sh->sector); 124 125 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 126 127 CHECK_DEVLOCK(); 128 hlist_add_head(&sh->hash, hp); 129} 130 131 132/* find an idle stripe, make sure it is unhashed, and return it. */ 133static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 134{ 135 struct stripe_head *sh = NULL; 136 struct list_head *first; 137 138 CHECK_DEVLOCK(); 139 if (list_empty(&conf->inactive_list)) 140 goto out; 141 first = conf->inactive_list.next; 142 sh = list_entry(first, struct stripe_head, lru); 143 list_del_init(first); 144 remove_hash(sh); 145 atomic_inc(&conf->active_stripes); 146out: 147 return sh; 148} 149 150static void shrink_buffers(struct stripe_head *sh, int num) 151{ 152 struct page *p; 153 int i; 154 155 for (i=0; i<num ; i++) { 156 p = sh->dev[i].page; 157 if (!p) 158 continue; 159 sh->dev[i].page = NULL; 160 put_page(p); 161 } 162} 163 164static int grow_buffers(struct stripe_head *sh, int num) 165{ 166 int i; 167 168 for (i=0; i<num; i++) { 169 struct page *page; 170 171 if (!(page = alloc_page(GFP_KERNEL))) { 172 return 1; 173 } 174 sh->dev[i].page = page; 175 } 176 return 0; 177} 178 179static void raid5_build_block (struct stripe_head *sh, int i); 180 181static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) 182{ 183 raid5_conf_t *conf = sh->raid_conf; 184 int disks = conf->raid_disks, i; 185 186 if (atomic_read(&sh->count) != 0) 187 BUG(); 188 if (test_bit(STRIPE_HANDLE, &sh->state)) 189 BUG(); 190 191 CHECK_DEVLOCK(); 192 PRINTK("init_stripe called, stripe %llu\n", 193 (unsigned long long)sh->sector); 194 195 remove_hash(sh); 196 197 sh->sector = sector; 198 sh->pd_idx = pd_idx; 199 sh->state = 0; 200 201 for (i=disks; i--; ) { 202 struct r5dev *dev = &sh->dev[i]; 203 204 if (dev->toread || dev->towrite || dev->written || 205 test_bit(R5_LOCKED, &dev->flags)) { 206 printk("sector=%llx i=%d %p %p %p %d\n", 207 (unsigned long long)sh->sector, i, dev->toread, 208 dev->towrite, dev->written, 209 test_bit(R5_LOCKED, &dev->flags)); 210 BUG(); 211 } 212 dev->flags = 0; 213 raid5_build_block(sh, i); 214 } 215 insert_hash(conf, sh); 216} 217 218static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) 219{ 220 struct stripe_head *sh; 221 struct hlist_node *hn; 222 223 CHECK_DEVLOCK(); 224 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 225 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 226 if (sh->sector == sector) 227 return sh; 228 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 229 return NULL; 230} 231 232static void unplug_slaves(mddev_t *mddev); 233static void raid5_unplug_device(request_queue_t *q); 234 235static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, 236 int pd_idx, int noblock) 237{ 238 struct stripe_head *sh; 239 240 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector); 241 242 spin_lock_irq(&conf->device_lock); 243 244 do { 245 wait_event_lock_irq(conf->wait_for_stripe, 246 conf->quiesce == 0, 247 conf->device_lock, /* nothing */); 248 sh = __find_stripe(conf, sector); 249 if (!sh) { 250 if (!conf->inactive_blocked) 251 sh = get_free_stripe(conf); 252 if (noblock && sh == NULL) 253 break; 254 if (!sh) { 255 conf->inactive_blocked = 1; 256 wait_event_lock_irq(conf->wait_for_stripe, 257 !list_empty(&conf->inactive_list) && 258 (atomic_read(&conf->active_stripes) 259 < (conf->max_nr_stripes *3/4) 260 || !conf->inactive_blocked), 261 conf->device_lock, 262 unplug_slaves(conf->mddev); 263 ); 264 conf->inactive_blocked = 0; 265 } else 266 init_stripe(sh, sector, pd_idx); 267 } else { 268 if (atomic_read(&sh->count)) { 269 if (!list_empty(&sh->lru)) 270 BUG(); 271 } else { 272 if (!test_bit(STRIPE_HANDLE, &sh->state)) 273 atomic_inc(&conf->active_stripes); 274 if (list_empty(&sh->lru)) 275 BUG(); 276 list_del_init(&sh->lru); 277 } 278 } 279 } while (sh == NULL); 280 281 if (sh) 282 atomic_inc(&sh->count); 283 284 spin_unlock_irq(&conf->device_lock); 285 return sh; 286} 287 288static int grow_one_stripe(raid5_conf_t *conf) 289{ 290 struct stripe_head *sh; 291 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 292 if (!sh) 293 return 0; 294 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 295 sh->raid_conf = conf; 296 spin_lock_init(&sh->lock); 297 298 if (grow_buffers(sh, conf->raid_disks)) { 299 shrink_buffers(sh, conf->raid_disks); 300 kmem_cache_free(conf->slab_cache, sh); 301 return 0; 302 } 303 /* we just created an active stripe so... */ 304 atomic_set(&sh->count, 1); 305 atomic_inc(&conf->active_stripes); 306 INIT_LIST_HEAD(&sh->lru); 307 release_stripe(sh); 308 return 1; 309} 310 311static int grow_stripes(raid5_conf_t *conf, int num) 312{ 313 kmem_cache_t *sc; 314 int devs = conf->raid_disks; 315 316 sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev)); 317 318 sc = kmem_cache_create(conf->cache_name, 319 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 320 0, 0, NULL, NULL); 321 if (!sc) 322 return 1; 323 conf->slab_cache = sc; 324 while (num--) { 325 if (!grow_one_stripe(conf)) 326 return 1; 327 } 328 return 0; 329} 330 331static int drop_one_stripe(raid5_conf_t *conf) 332{ 333 struct stripe_head *sh; 334 335 spin_lock_irq(&conf->device_lock); 336 sh = get_free_stripe(conf); 337 spin_unlock_irq(&conf->device_lock); 338 if (!sh) 339 return 0; 340 if (atomic_read(&sh->count)) 341 BUG(); 342 shrink_buffers(sh, conf->raid_disks); 343 kmem_cache_free(conf->slab_cache, sh); 344 atomic_dec(&conf->active_stripes); 345 return 1; 346} 347 348static void shrink_stripes(raid5_conf_t *conf) 349{ 350 while (drop_one_stripe(conf)) 351 ; 352 353 kmem_cache_destroy(conf->slab_cache); 354 conf->slab_cache = NULL; 355} 356 357static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, 358 int error) 359{ 360 struct stripe_head *sh = bi->bi_private; 361 raid5_conf_t *conf = sh->raid_conf; 362 int disks = conf->raid_disks, i; 363 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 364 365 if (bi->bi_size) 366 return 1; 367 368 for (i=0 ; i<disks; i++) 369 if (bi == &sh->dev[i].req) 370 break; 371 372 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n", 373 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 374 uptodate); 375 if (i == disks) { 376 BUG(); 377 return 0; 378 } 379 380 if (uptodate) { 381#if 0 382 struct bio *bio; 383 unsigned long flags; 384 spin_lock_irqsave(&conf->device_lock, flags); 385 /* we can return a buffer if we bypassed the cache or 386 * if the top buffer is not in highmem. If there are 387 * multiple buffers, leave the extra work to 388 * handle_stripe 389 */ 390 buffer = sh->bh_read[i]; 391 if (buffer && 392 (!PageHighMem(buffer->b_page) 393 || buffer->b_page == bh->b_page ) 394 ) { 395 sh->bh_read[i] = buffer->b_reqnext; 396 buffer->b_reqnext = NULL; 397 } else 398 buffer = NULL; 399 spin_unlock_irqrestore(&conf->device_lock, flags); 400 if (sh->bh_page[i]==bh->b_page) 401 set_buffer_uptodate(bh); 402 if (buffer) { 403 if (buffer->b_page != bh->b_page) 404 memcpy(buffer->b_data, bh->b_data, bh->b_size); 405 buffer->b_end_io(buffer, 1); 406 } 407#else 408 set_bit(R5_UPTODATE, &sh->dev[i].flags); 409#endif 410 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 411 printk(KERN_INFO "raid5: read error corrected!!\n"); 412 clear_bit(R5_ReadError, &sh->dev[i].flags); 413 clear_bit(R5_ReWrite, &sh->dev[i].flags); 414 } 415 if (atomic_read(&conf->disks[i].rdev->read_errors)) 416 atomic_set(&conf->disks[i].rdev->read_errors, 0); 417 } else { 418 int retry = 0; 419 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 420 atomic_inc(&conf->disks[i].rdev->read_errors); 421 if (conf->mddev->degraded) 422 printk(KERN_WARNING "raid5: read error not correctable.\n"); 423 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 424 /* Oh, no!!! */ 425 printk(KERN_WARNING "raid5: read error NOT corrected!!\n"); 426 else if (atomic_read(&conf->disks[i].rdev->read_errors) 427 > conf->max_nr_stripes) 428 printk(KERN_WARNING 429 "raid5: Too many read errors, failing device.\n"); 430 else 431 retry = 1; 432 if (retry) 433 set_bit(R5_ReadError, &sh->dev[i].flags); 434 else { 435 clear_bit(R5_ReadError, &sh->dev[i].flags); 436 clear_bit(R5_ReWrite, &sh->dev[i].flags); 437 md_error(conf->mddev, conf->disks[i].rdev); 438 } 439 } 440 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 441#if 0 442 /* must restore b_page before unlocking buffer... */ 443 if (sh->bh_page[i] != bh->b_page) { 444 bh->b_page = sh->bh_page[i]; 445 bh->b_data = page_address(bh->b_page); 446 clear_buffer_uptodate(bh); 447 } 448#endif 449 clear_bit(R5_LOCKED, &sh->dev[i].flags); 450 set_bit(STRIPE_HANDLE, &sh->state); 451 release_stripe(sh); 452 return 0; 453} 454 455static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, 456 int error) 457{ 458 struct stripe_head *sh = bi->bi_private; 459 raid5_conf_t *conf = sh->raid_conf; 460 int disks = conf->raid_disks, i; 461 unsigned long flags; 462 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 463 464 if (bi->bi_size) 465 return 1; 466 467 for (i=0 ; i<disks; i++) 468 if (bi == &sh->dev[i].req) 469 break; 470 471 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n", 472 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 473 uptodate); 474 if (i == disks) { 475 BUG(); 476 return 0; 477 } 478 479 spin_lock_irqsave(&conf->device_lock, flags); 480 if (!uptodate) 481 md_error(conf->mddev, conf->disks[i].rdev); 482 483 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 484 485 clear_bit(R5_LOCKED, &sh->dev[i].flags); 486 set_bit(STRIPE_HANDLE, &sh->state); 487 __release_stripe(conf, sh); 488 spin_unlock_irqrestore(&conf->device_lock, flags); 489 return 0; 490} 491 492 493static sector_t compute_blocknr(struct stripe_head *sh, int i); 494 495static void raid5_build_block (struct stripe_head *sh, int i) 496{ 497 struct r5dev *dev = &sh->dev[i]; 498 499 bio_init(&dev->req); 500 dev->req.bi_io_vec = &dev->vec; 501 dev->req.bi_vcnt++; 502 dev->req.bi_max_vecs++; 503 dev->vec.bv_page = dev->page; 504 dev->vec.bv_len = STRIPE_SIZE; 505 dev->vec.bv_offset = 0; 506 507 dev->req.bi_sector = sh->sector; 508 dev->req.bi_private = sh; 509 510 dev->flags = 0; 511 if (i != sh->pd_idx) 512 dev->sector = compute_blocknr(sh, i); 513} 514 515static void error(mddev_t *mddev, mdk_rdev_t *rdev) 516{ 517 char b[BDEVNAME_SIZE]; 518 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 519 PRINTK("raid5: error called\n"); 520 521 if (!test_bit(Faulty, &rdev->flags)) { 522 mddev->sb_dirty = 1; 523 if (test_bit(In_sync, &rdev->flags)) { 524 conf->working_disks--; 525 mddev->degraded++; 526 conf->failed_disks++; 527 clear_bit(In_sync, &rdev->flags); 528 /* 529 * if recovery was running, make sure it aborts. 530 */ 531 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 532 } 533 set_bit(Faulty, &rdev->flags); 534 printk (KERN_ALERT 535 "raid5: Disk failure on %s, disabling device." 536 " Operation continuing on %d devices\n", 537 bdevname(rdev->bdev,b), conf->working_disks); 538 } 539} 540 541/* 542 * Input: a 'big' sector number, 543 * Output: index of the data and parity disk, and the sector # in them. 544 */ 545static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, 546 unsigned int data_disks, unsigned int * dd_idx, 547 unsigned int * pd_idx, raid5_conf_t *conf) 548{ 549 long stripe; 550 unsigned long chunk_number; 551 unsigned int chunk_offset; 552 sector_t new_sector; 553 int sectors_per_chunk = conf->chunk_size >> 9; 554 555 /* First compute the information on this sector */ 556 557 /* 558 * Compute the chunk number and the sector offset inside the chunk 559 */ 560 chunk_offset = sector_div(r_sector, sectors_per_chunk); 561 chunk_number = r_sector; 562 BUG_ON(r_sector != chunk_number); 563 564 /* 565 * Compute the stripe number 566 */ 567 stripe = chunk_number / data_disks; 568 569 /* 570 * Compute the data disk and parity disk indexes inside the stripe 571 */ 572 *dd_idx = chunk_number % data_disks; 573 574 /* 575 * Select the parity disk based on the user selected algorithm. 576 */ 577 if (conf->level == 4) 578 *pd_idx = data_disks; 579 else switch (conf->algorithm) { 580 case ALGORITHM_LEFT_ASYMMETRIC: 581 *pd_idx = data_disks - stripe % raid_disks; 582 if (*dd_idx >= *pd_idx) 583 (*dd_idx)++; 584 break; 585 case ALGORITHM_RIGHT_ASYMMETRIC: 586 *pd_idx = stripe % raid_disks; 587 if (*dd_idx >= *pd_idx) 588 (*dd_idx)++; 589 break; 590 case ALGORITHM_LEFT_SYMMETRIC: 591 *pd_idx = data_disks - stripe % raid_disks; 592 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 593 break; 594 case ALGORITHM_RIGHT_SYMMETRIC: 595 *pd_idx = stripe % raid_disks; 596 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 597 break; 598 default: 599 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 600 conf->algorithm); 601 } 602 603 /* 604 * Finally, compute the new sector number 605 */ 606 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 607 return new_sector; 608} 609 610 611static sector_t compute_blocknr(struct stripe_head *sh, int i) 612{ 613 raid5_conf_t *conf = sh->raid_conf; 614 int raid_disks = conf->raid_disks, data_disks = raid_disks - 1; 615 sector_t new_sector = sh->sector, check; 616 int sectors_per_chunk = conf->chunk_size >> 9; 617 sector_t stripe; 618 int chunk_offset; 619 int chunk_number, dummy1, dummy2, dd_idx = i; 620 sector_t r_sector; 621 622 chunk_offset = sector_div(new_sector, sectors_per_chunk); 623 stripe = new_sector; 624 BUG_ON(new_sector != stripe); 625 626 627 switch (conf->algorithm) { 628 case ALGORITHM_LEFT_ASYMMETRIC: 629 case ALGORITHM_RIGHT_ASYMMETRIC: 630 if (i > sh->pd_idx) 631 i--; 632 break; 633 case ALGORITHM_LEFT_SYMMETRIC: 634 case ALGORITHM_RIGHT_SYMMETRIC: 635 if (i < sh->pd_idx) 636 i += raid_disks; 637 i -= (sh->pd_idx + 1); 638 break; 639 default: 640 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 641 conf->algorithm); 642 } 643 644 chunk_number = stripe * data_disks + i; 645 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 646 647 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 648 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 649 printk(KERN_ERR "compute_blocknr: map not correct\n"); 650 return 0; 651 } 652 return r_sector; 653} 654 655 656 657/* 658 * Copy data between a page in the stripe cache, and a bio. 659 * There are no alignment or size guarantees between the page or the 660 * bio except that there is some overlap. 661 * All iovecs in the bio must be considered. 662 */ 663static void copy_data(int frombio, struct bio *bio, 664 struct page *page, 665 sector_t sector) 666{ 667 char *pa = page_address(page); 668 struct bio_vec *bvl; 669 int i; 670 int page_offset; 671 672 if (bio->bi_sector >= sector) 673 page_offset = (signed)(bio->bi_sector - sector) * 512; 674 else 675 page_offset = (signed)(sector - bio->bi_sector) * -512; 676 bio_for_each_segment(bvl, bio, i) { 677 int len = bio_iovec_idx(bio,i)->bv_len; 678 int clen; 679 int b_offset = 0; 680 681 if (page_offset < 0) { 682 b_offset = -page_offset; 683 page_offset += b_offset; 684 len -= b_offset; 685 } 686 687 if (len > 0 && page_offset + len > STRIPE_SIZE) 688 clen = STRIPE_SIZE - page_offset; 689 else clen = len; 690 691 if (clen > 0) { 692 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 693 if (frombio) 694 memcpy(pa+page_offset, ba+b_offset, clen); 695 else 696 memcpy(ba+b_offset, pa+page_offset, clen); 697 __bio_kunmap_atomic(ba, KM_USER0); 698 } 699 if (clen < len) /* hit end of page */ 700 break; 701 page_offset += len; 702 } 703} 704 705#define check_xor() do { \ 706 if (count == MAX_XOR_BLOCKS) { \ 707 xor_block(count, STRIPE_SIZE, ptr); \ 708 count = 1; \ 709 } \ 710 } while(0) 711 712 713static void compute_block(struct stripe_head *sh, int dd_idx) 714{ 715 raid5_conf_t *conf = sh->raid_conf; 716 int i, count, disks = conf->raid_disks; 717 void *ptr[MAX_XOR_BLOCKS], *p; 718 719 PRINTK("compute_block, stripe %llu, idx %d\n", 720 (unsigned long long)sh->sector, dd_idx); 721 722 ptr[0] = page_address(sh->dev[dd_idx].page); 723 memset(ptr[0], 0, STRIPE_SIZE); 724 count = 1; 725 for (i = disks ; i--; ) { 726 if (i == dd_idx) 727 continue; 728 p = page_address(sh->dev[i].page); 729 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 730 ptr[count++] = p; 731 else 732 printk(KERN_ERR "compute_block() %d, stripe %llu, %d" 733 " not present\n", dd_idx, 734 (unsigned long long)sh->sector, i); 735 736 check_xor(); 737 } 738 if (count != 1) 739 xor_block(count, STRIPE_SIZE, ptr); 740 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 741} 742 743static void compute_parity(struct stripe_head *sh, int method) 744{ 745 raid5_conf_t *conf = sh->raid_conf; 746 int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count; 747 void *ptr[MAX_XOR_BLOCKS]; 748 struct bio *chosen; 749 750 PRINTK("compute_parity, stripe %llu, method %d\n", 751 (unsigned long long)sh->sector, method); 752 753 count = 1; 754 ptr[0] = page_address(sh->dev[pd_idx].page); 755 switch(method) { 756 case READ_MODIFY_WRITE: 757 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags)) 758 BUG(); 759 for (i=disks ; i-- ;) { 760 if (i==pd_idx) 761 continue; 762 if (sh->dev[i].towrite && 763 test_bit(R5_UPTODATE, &sh->dev[i].flags)) { 764 ptr[count++] = page_address(sh->dev[i].page); 765 chosen = sh->dev[i].towrite; 766 sh->dev[i].towrite = NULL; 767 768 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 769 wake_up(&conf->wait_for_overlap); 770 771 if (sh->dev[i].written) BUG(); 772 sh->dev[i].written = chosen; 773 check_xor(); 774 } 775 } 776 break; 777 case RECONSTRUCT_WRITE: 778 memset(ptr[0], 0, STRIPE_SIZE); 779 for (i= disks; i-- ;) 780 if (i!=pd_idx && sh->dev[i].towrite) { 781 chosen = sh->dev[i].towrite; 782 sh->dev[i].towrite = NULL; 783 784 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 785 wake_up(&conf->wait_for_overlap); 786 787 if (sh->dev[i].written) BUG(); 788 sh->dev[i].written = chosen; 789 } 790 break; 791 case CHECK_PARITY: 792 break; 793 } 794 if (count>1) { 795 xor_block(count, STRIPE_SIZE, ptr); 796 count = 1; 797 } 798 799 for (i = disks; i--;) 800 if (sh->dev[i].written) { 801 sector_t sector = sh->dev[i].sector; 802 struct bio *wbi = sh->dev[i].written; 803 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 804 copy_data(1, wbi, sh->dev[i].page, sector); 805 wbi = r5_next_bio(wbi, sector); 806 } 807 808 set_bit(R5_LOCKED, &sh->dev[i].flags); 809 set_bit(R5_UPTODATE, &sh->dev[i].flags); 810 } 811 812 switch(method) { 813 case RECONSTRUCT_WRITE: 814 case CHECK_PARITY: 815 for (i=disks; i--;) 816 if (i != pd_idx) { 817 ptr[count++] = page_address(sh->dev[i].page); 818 check_xor(); 819 } 820 break; 821 case READ_MODIFY_WRITE: 822 for (i = disks; i--;) 823 if (sh->dev[i].written) { 824 ptr[count++] = page_address(sh->dev[i].page); 825 check_xor(); 826 } 827 } 828 if (count != 1) 829 xor_block(count, STRIPE_SIZE, ptr); 830 831 if (method != CHECK_PARITY) { 832 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 833 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 834 } else 835 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 836} 837 838/* 839 * Each stripe/dev can have one or more bion attached. 840 * toread/towrite point to the first in a chain. 841 * The bi_next chain must be in order. 842 */ 843static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 844{ 845 struct bio **bip; 846 raid5_conf_t *conf = sh->raid_conf; 847 int firstwrite=0; 848 849 PRINTK("adding bh b#%llu to stripe s#%llu\n", 850 (unsigned long long)bi->bi_sector, 851 (unsigned long long)sh->sector); 852 853 854 spin_lock(&sh->lock); 855 spin_lock_irq(&conf->device_lock); 856 if (forwrite) { 857 bip = &sh->dev[dd_idx].towrite; 858 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 859 firstwrite = 1; 860 } else 861 bip = &sh->dev[dd_idx].toread; 862 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 863 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 864 goto overlap; 865 bip = & (*bip)->bi_next; 866 } 867 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 868 goto overlap; 869 870 if (*bip && bi->bi_next && (*bip) != bi->bi_next) 871 BUG(); 872 if (*bip) 873 bi->bi_next = *bip; 874 *bip = bi; 875 bi->bi_phys_segments ++; 876 spin_unlock_irq(&conf->device_lock); 877 spin_unlock(&sh->lock); 878 879 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n", 880 (unsigned long long)bi->bi_sector, 881 (unsigned long long)sh->sector, dd_idx); 882 883 if (conf->mddev->bitmap && firstwrite) { 884 sh->bm_seq = conf->seq_write; 885 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 886 STRIPE_SECTORS, 0); 887 set_bit(STRIPE_BIT_DELAY, &sh->state); 888 } 889 890 if (forwrite) { 891 /* check if page is covered */ 892 sector_t sector = sh->dev[dd_idx].sector; 893 for (bi=sh->dev[dd_idx].towrite; 894 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 895 bi && bi->bi_sector <= sector; 896 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 897 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 898 sector = bi->bi_sector + (bi->bi_size>>9); 899 } 900 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 901 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 902 } 903 return 1; 904 905 overlap: 906 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 907 spin_unlock_irq(&conf->device_lock); 908 spin_unlock(&sh->lock); 909 return 0; 910} 911 912 913/* 914 * handle_stripe - do things to a stripe. 915 * 916 * We lock the stripe and then examine the state of various bits 917 * to see what needs to be done. 918 * Possible results: 919 * return some read request which now have data 920 * return some write requests which are safely on disc 921 * schedule a read on some buffers 922 * schedule a write of some buffers 923 * return confirmation of parity correctness 924 * 925 * Parity calculations are done inside the stripe lock 926 * buffers are taken off read_list or write_list, and bh_cache buffers 927 * get BH_Lock set before the stripe lock is released. 928 * 929 */ 930 931static void handle_stripe(struct stripe_head *sh) 932{ 933 raid5_conf_t *conf = sh->raid_conf; 934 int disks = conf->raid_disks; 935 struct bio *return_bi= NULL; 936 struct bio *bi; 937 int i; 938 int syncing; 939 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; 940 int non_overwrite = 0; 941 int failed_num=0; 942 struct r5dev *dev; 943 944 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n", 945 (unsigned long long)sh->sector, atomic_read(&sh->count), 946 sh->pd_idx); 947 948 spin_lock(&sh->lock); 949 clear_bit(STRIPE_HANDLE, &sh->state); 950 clear_bit(STRIPE_DELAYED, &sh->state); 951 952 syncing = test_bit(STRIPE_SYNCING, &sh->state); 953 /* Now to look around and see what can be done */ 954 955 rcu_read_lock(); 956 for (i=disks; i--; ) { 957 mdk_rdev_t *rdev; 958 dev = &sh->dev[i]; 959 clear_bit(R5_Insync, &dev->flags); 960 961 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 962 i, dev->flags, dev->toread, dev->towrite, dev->written); 963 /* maybe we can reply to a read */ 964 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 965 struct bio *rbi, *rbi2; 966 PRINTK("Return read for disc %d\n", i); 967 spin_lock_irq(&conf->device_lock); 968 rbi = dev->toread; 969 dev->toread = NULL; 970 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 971 wake_up(&conf->wait_for_overlap); 972 spin_unlock_irq(&conf->device_lock); 973 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 974 copy_data(0, rbi, dev->page, dev->sector); 975 rbi2 = r5_next_bio(rbi, dev->sector); 976 spin_lock_irq(&conf->device_lock); 977 if (--rbi->bi_phys_segments == 0) { 978 rbi->bi_next = return_bi; 979 return_bi = rbi; 980 } 981 spin_unlock_irq(&conf->device_lock); 982 rbi = rbi2; 983 } 984 } 985 986 /* now count some things */ 987 if (test_bit(R5_LOCKED, &dev->flags)) locked++; 988 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++; 989 990 991 if (dev->toread) to_read++; 992 if (dev->towrite) { 993 to_write++; 994 if (!test_bit(R5_OVERWRITE, &dev->flags)) 995 non_overwrite++; 996 } 997 if (dev->written) written++; 998 rdev = rcu_dereference(conf->disks[i].rdev); 999 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 1000 /* The ReadError flag will just be confusing now */ 1001 clear_bit(R5_ReadError, &dev->flags); 1002 clear_bit(R5_ReWrite, &dev->flags); 1003 } 1004 if (!rdev || !test_bit(In_sync, &rdev->flags) 1005 || test_bit(R5_ReadError, &dev->flags)) { 1006 failed++; 1007 failed_num = i; 1008 } else 1009 set_bit(R5_Insync, &dev->flags); 1010 } 1011 rcu_read_unlock(); 1012 PRINTK("locked=%d uptodate=%d to_read=%d" 1013 " to_write=%d failed=%d failed_num=%d\n", 1014 locked, uptodate, to_read, to_write, failed, failed_num); 1015 /* check if the array has lost two devices and, if so, some requests might 1016 * need to be failed 1017 */ 1018 if (failed > 1 && to_read+to_write+written) { 1019 for (i=disks; i--; ) { 1020 int bitmap_end = 0; 1021 1022 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1023 mdk_rdev_t *rdev; 1024 rcu_read_lock(); 1025 rdev = rcu_dereference(conf->disks[i].rdev); 1026 if (rdev && test_bit(In_sync, &rdev->flags)) 1027 /* multiple read failures in one stripe */ 1028 md_error(conf->mddev, rdev); 1029 rcu_read_unlock(); 1030 } 1031 1032 spin_lock_irq(&conf->device_lock); 1033 /* fail all writes first */ 1034 bi = sh->dev[i].towrite; 1035 sh->dev[i].towrite = NULL; 1036 if (bi) { to_write--; bitmap_end = 1; } 1037 1038 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1039 wake_up(&conf->wait_for_overlap); 1040 1041 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1042 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1043 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1044 if (--bi->bi_phys_segments == 0) { 1045 md_write_end(conf->mddev); 1046 bi->bi_next = return_bi; 1047 return_bi = bi; 1048 } 1049 bi = nextbi; 1050 } 1051 /* and fail all 'written' */ 1052 bi = sh->dev[i].written; 1053 sh->dev[i].written = NULL; 1054 if (bi) bitmap_end = 1; 1055 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { 1056 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1057 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1058 if (--bi->bi_phys_segments == 0) { 1059 md_write_end(conf->mddev); 1060 bi->bi_next = return_bi; 1061 return_bi = bi; 1062 } 1063 bi = bi2; 1064 } 1065 1066 /* fail any reads if this device is non-operational */ 1067 if (!test_bit(R5_Insync, &sh->dev[i].flags) || 1068 test_bit(R5_ReadError, &sh->dev[i].flags)) { 1069 bi = sh->dev[i].toread; 1070 sh->dev[i].toread = NULL; 1071 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1072 wake_up(&conf->wait_for_overlap); 1073 if (bi) to_read--; 1074 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1075 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1076 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1077 if (--bi->bi_phys_segments == 0) { 1078 bi->bi_next = return_bi; 1079 return_bi = bi; 1080 } 1081 bi = nextbi; 1082 } 1083 } 1084 spin_unlock_irq(&conf->device_lock); 1085 if (bitmap_end) 1086 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1087 STRIPE_SECTORS, 0, 0); 1088 } 1089 } 1090 if (failed > 1 && syncing) { 1091 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 1092 clear_bit(STRIPE_SYNCING, &sh->state); 1093 syncing = 0; 1094 } 1095 1096 /* might be able to return some write requests if the parity block 1097 * is safe, or on a failed drive 1098 */ 1099 dev = &sh->dev[sh->pd_idx]; 1100 if ( written && 1101 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) && 1102 test_bit(R5_UPTODATE, &dev->flags)) 1103 || (failed == 1 && failed_num == sh->pd_idx)) 1104 ) { 1105 /* any written block on an uptodate or failed drive can be returned. 1106 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 1107 * never LOCKED, so we don't need to test 'failed' directly. 1108 */ 1109 for (i=disks; i--; ) 1110 if (sh->dev[i].written) { 1111 dev = &sh->dev[i]; 1112 if (!test_bit(R5_LOCKED, &dev->flags) && 1113 test_bit(R5_UPTODATE, &dev->flags) ) { 1114 /* We can return any write requests */ 1115 struct bio *wbi, *wbi2; 1116 int bitmap_end = 0; 1117 PRINTK("Return write for disc %d\n", i); 1118 spin_lock_irq(&conf->device_lock); 1119 wbi = dev->written; 1120 dev->written = NULL; 1121 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1122 wbi2 = r5_next_bio(wbi, dev->sector); 1123 if (--wbi->bi_phys_segments == 0) { 1124 md_write_end(conf->mddev); 1125 wbi->bi_next = return_bi; 1126 return_bi = wbi; 1127 } 1128 wbi = wbi2; 1129 } 1130 if (dev->towrite == NULL) 1131 bitmap_end = 1; 1132 spin_unlock_irq(&conf->device_lock); 1133 if (bitmap_end) 1134 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1135 STRIPE_SECTORS, 1136 !test_bit(STRIPE_DEGRADED, &sh->state), 0); 1137 } 1138 } 1139 } 1140 1141 /* Now we might consider reading some blocks, either to check/generate 1142 * parity, or to satisfy requests 1143 * or to load a block that is being partially written. 1144 */ 1145 if (to_read || non_overwrite || (syncing && (uptodate < disks))) { 1146 for (i=disks; i--;) { 1147 dev = &sh->dev[i]; 1148 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1149 (dev->toread || 1150 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1151 syncing || 1152 (failed && (sh->dev[failed_num].toread || 1153 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags)))) 1154 ) 1155 ) { 1156 /* we would like to get this block, possibly 1157 * by computing it, but we might not be able to 1158 */ 1159 if (uptodate == disks-1) { 1160 PRINTK("Computing block %d\n", i); 1161 compute_block(sh, i); 1162 uptodate++; 1163 } else if (test_bit(R5_Insync, &dev->flags)) { 1164 set_bit(R5_LOCKED, &dev->flags); 1165 set_bit(R5_Wantread, &dev->flags); 1166#if 0 1167 /* if I am just reading this block and we don't have 1168 a failed drive, or any pending writes then sidestep the cache */ 1169 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext && 1170 ! syncing && !failed && !to_write) { 1171 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page; 1172 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data; 1173 } 1174#endif 1175 locked++; 1176 PRINTK("Reading block %d (sync=%d)\n", 1177 i, syncing); 1178 } 1179 } 1180 } 1181 set_bit(STRIPE_HANDLE, &sh->state); 1182 } 1183 1184 /* now to consider writing and what else, if anything should be read */ 1185 if (to_write) { 1186 int rmw=0, rcw=0; 1187 for (i=disks ; i--;) { 1188 /* would I have to read this buffer for read_modify_write */ 1189 dev = &sh->dev[i]; 1190 if ((dev->towrite || i == sh->pd_idx) && 1191 (!test_bit(R5_LOCKED, &dev->flags) 1192#if 0 1193|| sh->bh_page[i]!=bh->b_page 1194#endif 1195 ) && 1196 !test_bit(R5_UPTODATE, &dev->flags)) { 1197 if (test_bit(R5_Insync, &dev->flags) 1198/* && !(!mddev->insync && i == sh->pd_idx) */ 1199 ) 1200 rmw++; 1201 else rmw += 2*disks; /* cannot read it */ 1202 } 1203 /* Would I have to read this buffer for reconstruct_write */ 1204 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 1205 (!test_bit(R5_LOCKED, &dev->flags) 1206#if 0 1207|| sh->bh_page[i] != bh->b_page 1208#endif 1209 ) && 1210 !test_bit(R5_UPTODATE, &dev->flags)) { 1211 if (test_bit(R5_Insync, &dev->flags)) rcw++; 1212 else rcw += 2*disks; 1213 } 1214 } 1215 PRINTK("for sector %llu, rmw=%d rcw=%d\n", 1216 (unsigned long long)sh->sector, rmw, rcw); 1217 set_bit(STRIPE_HANDLE, &sh->state); 1218 if (rmw < rcw && rmw > 0) 1219 /* prefer read-modify-write, but need to get some data */ 1220 for (i=disks; i--;) { 1221 dev = &sh->dev[i]; 1222 if ((dev->towrite || i == sh->pd_idx) && 1223 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1224 test_bit(R5_Insync, &dev->flags)) { 1225 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1226 { 1227 PRINTK("Read_old block %d for r-m-w\n", i); 1228 set_bit(R5_LOCKED, &dev->flags); 1229 set_bit(R5_Wantread, &dev->flags); 1230 locked++; 1231 } else { 1232 set_bit(STRIPE_DELAYED, &sh->state); 1233 set_bit(STRIPE_HANDLE, &sh->state); 1234 } 1235 } 1236 } 1237 if (rcw <= rmw && rcw > 0) 1238 /* want reconstruct write, but need to get some data */ 1239 for (i=disks; i--;) { 1240 dev = &sh->dev[i]; 1241 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 1242 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1243 test_bit(R5_Insync, &dev->flags)) { 1244 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1245 { 1246 PRINTK("Read_old block %d for Reconstruct\n", i); 1247 set_bit(R5_LOCKED, &dev->flags); 1248 set_bit(R5_Wantread, &dev->flags); 1249 locked++; 1250 } else { 1251 set_bit(STRIPE_DELAYED, &sh->state); 1252 set_bit(STRIPE_HANDLE, &sh->state); 1253 } 1254 } 1255 } 1256 /* now if nothing is locked, and if we have enough data, we can start a write request */ 1257 if (locked == 0 && (rcw == 0 ||rmw == 0) && 1258 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 1259 PRINTK("Computing parity...\n"); 1260 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE); 1261 /* now every locked buffer is ready to be written */ 1262 for (i=disks; i--;) 1263 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 1264 PRINTK("Writing block %d\n", i); 1265 locked++; 1266 set_bit(R5_Wantwrite, &sh->dev[i].flags); 1267 if (!test_bit(R5_Insync, &sh->dev[i].flags) 1268 || (i==sh->pd_idx && failed == 0)) 1269 set_bit(STRIPE_INSYNC, &sh->state); 1270 } 1271 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 1272 atomic_dec(&conf->preread_active_stripes); 1273 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 1274 md_wakeup_thread(conf->mddev->thread); 1275 } 1276 } 1277 } 1278 1279 /* maybe we need to check and possibly fix the parity for this stripe 1280 * Any reads will already have been scheduled, so we just see if enough data 1281 * is available 1282 */ 1283 if (syncing && locked == 0 && 1284 !test_bit(STRIPE_INSYNC, &sh->state)) { 1285 set_bit(STRIPE_HANDLE, &sh->state); 1286 if (failed == 0) { 1287 char *pagea; 1288 if (uptodate != disks) 1289 BUG(); 1290 compute_parity(sh, CHECK_PARITY); 1291 uptodate--; 1292 pagea = page_address(sh->dev[sh->pd_idx].page); 1293 if ((*(u32*)pagea) == 0 && 1294 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) { 1295 /* parity is correct (on disc, not in buffer any more) */ 1296 set_bit(STRIPE_INSYNC, &sh->state); 1297 } else { 1298 conf->mddev->resync_mismatches += STRIPE_SECTORS; 1299 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 1300 /* don't try to repair!! */ 1301 set_bit(STRIPE_INSYNC, &sh->state); 1302 else { 1303 compute_block(sh, sh->pd_idx); 1304 uptodate++; 1305 } 1306 } 1307 } 1308 if (!test_bit(STRIPE_INSYNC, &sh->state)) { 1309 /* either failed parity check, or recovery is happening */ 1310 if (failed==0) 1311 failed_num = sh->pd_idx; 1312 dev = &sh->dev[failed_num]; 1313 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 1314 BUG_ON(uptodate != disks); 1315 1316 set_bit(R5_LOCKED, &dev->flags); 1317 set_bit(R5_Wantwrite, &dev->flags); 1318 clear_bit(STRIPE_DEGRADED, &sh->state); 1319 locked++; 1320 set_bit(STRIPE_INSYNC, &sh->state); 1321 } 1322 } 1323 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 1324 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 1325 clear_bit(STRIPE_SYNCING, &sh->state); 1326 } 1327 1328 /* If the failed drive is just a ReadError, then we might need to progress 1329 * the repair/check process 1330 */ 1331 if (failed == 1 && ! conf->mddev->ro && 1332 test_bit(R5_ReadError, &sh->dev[failed_num].flags) 1333 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags) 1334 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags) 1335 ) { 1336 dev = &sh->dev[failed_num]; 1337 if (!test_bit(R5_ReWrite, &dev->flags)) { 1338 set_bit(R5_Wantwrite, &dev->flags); 1339 set_bit(R5_ReWrite, &dev->flags); 1340 set_bit(R5_LOCKED, &dev->flags); 1341 } else { 1342 /* let's read it back */ 1343 set_bit(R5_Wantread, &dev->flags); 1344 set_bit(R5_LOCKED, &dev->flags); 1345 } 1346 } 1347 1348 spin_unlock(&sh->lock); 1349 1350 while ((bi=return_bi)) { 1351 int bytes = bi->bi_size; 1352 1353 return_bi = bi->bi_next; 1354 bi->bi_next = NULL; 1355 bi->bi_size = 0; 1356 bi->bi_end_io(bi, bytes, 0); 1357 } 1358 for (i=disks; i-- ;) { 1359 int rw; 1360 struct bio *bi; 1361 mdk_rdev_t *rdev; 1362 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 1363 rw = 1; 1364 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1365 rw = 0; 1366 else 1367 continue; 1368 1369 bi = &sh->dev[i].req; 1370 1371 bi->bi_rw = rw; 1372 if (rw) 1373 bi->bi_end_io = raid5_end_write_request; 1374 else 1375 bi->bi_end_io = raid5_end_read_request; 1376 1377 rcu_read_lock(); 1378 rdev = rcu_dereference(conf->disks[i].rdev); 1379 if (rdev && test_bit(Faulty, &rdev->flags)) 1380 rdev = NULL; 1381 if (rdev) 1382 atomic_inc(&rdev->nr_pending); 1383 rcu_read_unlock(); 1384 1385 if (rdev) { 1386 if (syncing) 1387 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1388 1389 bi->bi_bdev = rdev->bdev; 1390 PRINTK("for %llu schedule op %ld on disc %d\n", 1391 (unsigned long long)sh->sector, bi->bi_rw, i); 1392 atomic_inc(&sh->count); 1393 bi->bi_sector = sh->sector + rdev->data_offset; 1394 bi->bi_flags = 1 << BIO_UPTODATE; 1395 bi->bi_vcnt = 1; 1396 bi->bi_max_vecs = 1; 1397 bi->bi_idx = 0; 1398 bi->bi_io_vec = &sh->dev[i].vec; 1399 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1400 bi->bi_io_vec[0].bv_offset = 0; 1401 bi->bi_size = STRIPE_SIZE; 1402 bi->bi_next = NULL; 1403 generic_make_request(bi); 1404 } else { 1405 if (rw == 1) 1406 set_bit(STRIPE_DEGRADED, &sh->state); 1407 PRINTK("skip op %ld on disc %d for sector %llu\n", 1408 bi->bi_rw, i, (unsigned long long)sh->sector); 1409 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1410 set_bit(STRIPE_HANDLE, &sh->state); 1411 } 1412 } 1413} 1414 1415static inline void raid5_activate_delayed(raid5_conf_t *conf) 1416{ 1417 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 1418 while (!list_empty(&conf->delayed_list)) { 1419 struct list_head *l = conf->delayed_list.next; 1420 struct stripe_head *sh; 1421 sh = list_entry(l, struct stripe_head, lru); 1422 list_del_init(l); 1423 clear_bit(STRIPE_DELAYED, &sh->state); 1424 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1425 atomic_inc(&conf->preread_active_stripes); 1426 list_add_tail(&sh->lru, &conf->handle_list); 1427 } 1428 } 1429} 1430 1431static inline void activate_bit_delay(raid5_conf_t *conf) 1432{ 1433 /* device_lock is held */ 1434 struct list_head head; 1435 list_add(&head, &conf->bitmap_list); 1436 list_del_init(&conf->bitmap_list); 1437 while (!list_empty(&head)) { 1438 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 1439 list_del_init(&sh->lru); 1440 atomic_inc(&sh->count); 1441 __release_stripe(conf, sh); 1442 } 1443} 1444 1445static void unplug_slaves(mddev_t *mddev) 1446{ 1447 raid5_conf_t *conf = mddev_to_conf(mddev); 1448 int i; 1449 1450 rcu_read_lock(); 1451 for (i=0; i<mddev->raid_disks; i++) { 1452 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 1453 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 1454 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 1455 1456 atomic_inc(&rdev->nr_pending); 1457 rcu_read_unlock(); 1458 1459 if (r_queue->unplug_fn) 1460 r_queue->unplug_fn(r_queue); 1461 1462 rdev_dec_pending(rdev, mddev); 1463 rcu_read_lock(); 1464 } 1465 } 1466 rcu_read_unlock(); 1467} 1468 1469static void raid5_unplug_device(request_queue_t *q) 1470{ 1471 mddev_t *mddev = q->queuedata; 1472 raid5_conf_t *conf = mddev_to_conf(mddev); 1473 unsigned long flags; 1474 1475 spin_lock_irqsave(&conf->device_lock, flags); 1476 1477 if (blk_remove_plug(q)) { 1478 conf->seq_flush++; 1479 raid5_activate_delayed(conf); 1480 } 1481 md_wakeup_thread(mddev->thread); 1482 1483 spin_unlock_irqrestore(&conf->device_lock, flags); 1484 1485 unplug_slaves(mddev); 1486} 1487 1488static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, 1489 sector_t *error_sector) 1490{ 1491 mddev_t *mddev = q->queuedata; 1492 raid5_conf_t *conf = mddev_to_conf(mddev); 1493 int i, ret = 0; 1494 1495 rcu_read_lock(); 1496 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 1497 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 1498 if (rdev && !test_bit(Faulty, &rdev->flags)) { 1499 struct block_device *bdev = rdev->bdev; 1500 request_queue_t *r_queue = bdev_get_queue(bdev); 1501 1502 if (!r_queue->issue_flush_fn) 1503 ret = -EOPNOTSUPP; 1504 else { 1505 atomic_inc(&rdev->nr_pending); 1506 rcu_read_unlock(); 1507 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, 1508 error_sector); 1509 rdev_dec_pending(rdev, mddev); 1510 rcu_read_lock(); 1511 } 1512 } 1513 } 1514 rcu_read_unlock(); 1515 return ret; 1516} 1517 1518static inline void raid5_plug_device(raid5_conf_t *conf) 1519{ 1520 spin_lock_irq(&conf->device_lock); 1521 blk_plug_device(conf->mddev->queue); 1522 spin_unlock_irq(&conf->device_lock); 1523} 1524 1525static int make_request (request_queue_t *q, struct bio * bi) 1526{ 1527 mddev_t *mddev = q->queuedata; 1528 raid5_conf_t *conf = mddev_to_conf(mddev); 1529 const unsigned int raid_disks = conf->raid_disks; 1530 const unsigned int data_disks = raid_disks - 1; 1531 unsigned int dd_idx, pd_idx; 1532 sector_t new_sector; 1533 sector_t logical_sector, last_sector; 1534 struct stripe_head *sh; 1535 const int rw = bio_data_dir(bi); 1536 1537 if (unlikely(bio_barrier(bi))) { 1538 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 1539 return 0; 1540 } 1541 1542 md_write_start(mddev, bi); 1543 1544 disk_stat_inc(mddev->gendisk, ios[rw]); 1545 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 1546 1547 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 1548 last_sector = bi->bi_sector + (bi->bi_size>>9); 1549 bi->bi_next = NULL; 1550 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 1551 1552 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 1553 DEFINE_WAIT(w); 1554 1555 new_sector = raid5_compute_sector(logical_sector, 1556 raid_disks, data_disks, &dd_idx, &pd_idx, conf); 1557 1558 PRINTK("raid5: make_request, sector %llu logical %llu\n", 1559 (unsigned long long)new_sector, 1560 (unsigned long long)logical_sector); 1561 1562 retry: 1563 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 1564 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK)); 1565 if (sh) { 1566 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 1567 /* Add failed due to overlap. Flush everything 1568 * and wait a while 1569 */ 1570 raid5_unplug_device(mddev->queue); 1571 release_stripe(sh); 1572 schedule(); 1573 goto retry; 1574 } 1575 finish_wait(&conf->wait_for_overlap, &w); 1576 raid5_plug_device(conf); 1577 handle_stripe(sh); 1578 release_stripe(sh); 1579 1580 } else { 1581 /* cannot get stripe for read-ahead, just give-up */ 1582 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1583 finish_wait(&conf->wait_for_overlap, &w); 1584 break; 1585 } 1586 1587 } 1588 spin_lock_irq(&conf->device_lock); 1589 if (--bi->bi_phys_segments == 0) { 1590 int bytes = bi->bi_size; 1591 1592 if ( bio_data_dir(bi) == WRITE ) 1593 md_write_end(mddev); 1594 bi->bi_size = 0; 1595 bi->bi_end_io(bi, bytes, 0); 1596 } 1597 spin_unlock_irq(&conf->device_lock); 1598 return 0; 1599} 1600 1601/* FIXME go_faster isn't used */ 1602static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1603{ 1604 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1605 struct stripe_head *sh; 1606 int sectors_per_chunk = conf->chunk_size >> 9; 1607 sector_t x; 1608 unsigned long stripe; 1609 int chunk_offset; 1610 int dd_idx, pd_idx; 1611 sector_t first_sector; 1612 int raid_disks = conf->raid_disks; 1613 int data_disks = raid_disks-1; 1614 sector_t max_sector = mddev->size << 1; 1615 int sync_blocks; 1616 1617 if (sector_nr >= max_sector) { 1618 /* just being told to finish up .. nothing much to do */ 1619 unplug_slaves(mddev); 1620 1621 if (mddev->curr_resync < max_sector) /* aborted */ 1622 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 1623 &sync_blocks, 1); 1624 else /* compelted sync */ 1625 conf->fullsync = 0; 1626 bitmap_close_sync(mddev->bitmap); 1627 1628 return 0; 1629 } 1630 /* if there is 1 or more failed drives and we are trying 1631 * to resync, then assert that we are finished, because there is 1632 * nothing we can do. 1633 */ 1634 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 1635 sector_t rv = (mddev->size << 1) - sector_nr; 1636 *skipped = 1; 1637 return rv; 1638 } 1639 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1640 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 1641 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 1642 /* we can skip this block, and probably more */ 1643 sync_blocks /= STRIPE_SECTORS; 1644 *skipped = 1; 1645 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 1646 } 1647 1648 x = sector_nr; 1649 chunk_offset = sector_div(x, sectors_per_chunk); 1650 stripe = x; 1651 BUG_ON(x != stripe); 1652 1653 first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk 1654 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf); 1655 sh = get_active_stripe(conf, sector_nr, pd_idx, 1); 1656 if (sh == NULL) { 1657 sh = get_active_stripe(conf, sector_nr, pd_idx, 0); 1658 /* make sure we don't swamp the stripe cache if someone else 1659 * is trying to get access 1660 */ 1661 schedule_timeout_uninterruptible(1); 1662 } 1663 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0); 1664 spin_lock(&sh->lock); 1665 set_bit(STRIPE_SYNCING, &sh->state); 1666 clear_bit(STRIPE_INSYNC, &sh->state); 1667 spin_unlock(&sh->lock); 1668 1669 handle_stripe(sh); 1670 release_stripe(sh); 1671 1672 return STRIPE_SECTORS; 1673} 1674 1675/* 1676 * This is our raid5 kernel thread. 1677 * 1678 * We scan the hash table for stripes which can be handled now. 1679 * During the scan, completed stripes are saved for us by the interrupt 1680 * handler, so that they will not have to wait for our next wakeup. 1681 */ 1682static void raid5d (mddev_t *mddev) 1683{ 1684 struct stripe_head *sh; 1685 raid5_conf_t *conf = mddev_to_conf(mddev); 1686 int handled; 1687 1688 PRINTK("+++ raid5d active\n"); 1689 1690 md_check_recovery(mddev); 1691 1692 handled = 0; 1693 spin_lock_irq(&conf->device_lock); 1694 while (1) { 1695 struct list_head *first; 1696 1697 if (conf->seq_flush - conf->seq_write > 0) { 1698 int seq = conf->seq_flush; 1699 spin_unlock_irq(&conf->device_lock); 1700 bitmap_unplug(mddev->bitmap); 1701 spin_lock_irq(&conf->device_lock); 1702 conf->seq_write = seq; 1703 activate_bit_delay(conf); 1704 } 1705 1706 if (list_empty(&conf->handle_list) && 1707 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && 1708 !blk_queue_plugged(mddev->queue) && 1709 !list_empty(&conf->delayed_list)) 1710 raid5_activate_delayed(conf); 1711 1712 if (list_empty(&conf->handle_list)) 1713 break; 1714 1715 first = conf->handle_list.next; 1716 sh = list_entry(first, struct stripe_head, lru); 1717 1718 list_del_init(first); 1719 atomic_inc(&sh->count); 1720 if (atomic_read(&sh->count)!= 1) 1721 BUG(); 1722 spin_unlock_irq(&conf->device_lock); 1723 1724 handled++; 1725 handle_stripe(sh); 1726 release_stripe(sh); 1727 1728 spin_lock_irq(&conf->device_lock); 1729 } 1730 PRINTK("%d stripes handled\n", handled); 1731 1732 spin_unlock_irq(&conf->device_lock); 1733 1734 unplug_slaves(mddev); 1735 1736 PRINTK("--- raid5d inactive\n"); 1737} 1738 1739static ssize_t 1740raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 1741{ 1742 raid5_conf_t *conf = mddev_to_conf(mddev); 1743 if (conf) 1744 return sprintf(page, "%d\n", conf->max_nr_stripes); 1745 else 1746 return 0; 1747} 1748 1749static ssize_t 1750raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 1751{ 1752 raid5_conf_t *conf = mddev_to_conf(mddev); 1753 char *end; 1754 int new; 1755 if (len >= PAGE_SIZE) 1756 return -EINVAL; 1757 if (!conf) 1758 return -ENODEV; 1759 1760 new = simple_strtoul(page, &end, 10); 1761 if (!*page || (*end && *end != '\n') ) 1762 return -EINVAL; 1763 if (new <= 16 || new > 32768) 1764 return -EINVAL; 1765 while (new < conf->max_nr_stripes) { 1766 if (drop_one_stripe(conf)) 1767 conf->max_nr_stripes--; 1768 else 1769 break; 1770 } 1771 while (new > conf->max_nr_stripes) { 1772 if (grow_one_stripe(conf)) 1773 conf->max_nr_stripes++; 1774 else break; 1775 } 1776 return len; 1777} 1778 1779static struct md_sysfs_entry 1780raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 1781 raid5_show_stripe_cache_size, 1782 raid5_store_stripe_cache_size); 1783 1784static ssize_t 1785stripe_cache_active_show(mddev_t *mddev, char *page) 1786{ 1787 raid5_conf_t *conf = mddev_to_conf(mddev); 1788 if (conf) 1789 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 1790 else 1791 return 0; 1792} 1793 1794static struct md_sysfs_entry 1795raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 1796 1797static struct attribute *raid5_attrs[] = { 1798 &raid5_stripecache_size.attr, 1799 &raid5_stripecache_active.attr, 1800 NULL, 1801}; 1802static struct attribute_group raid5_attrs_group = { 1803 .name = NULL, 1804 .attrs = raid5_attrs, 1805}; 1806 1807static int run(mddev_t *mddev) 1808{ 1809 raid5_conf_t *conf; 1810 int raid_disk, memory; 1811 mdk_rdev_t *rdev; 1812 struct disk_info *disk; 1813 struct list_head *tmp; 1814 1815 if (mddev->level != 5 && mddev->level != 4) { 1816 printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n", 1817 mdname(mddev), mddev->level); 1818 return -EIO; 1819 } 1820 1821 mddev->private = kzalloc(sizeof (raid5_conf_t) 1822 + mddev->raid_disks * sizeof(struct disk_info), 1823 GFP_KERNEL); 1824 if ((conf = mddev->private) == NULL) 1825 goto abort; 1826 1827 conf->mddev = mddev; 1828 1829 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 1830 goto abort; 1831 1832 spin_lock_init(&conf->device_lock); 1833 init_waitqueue_head(&conf->wait_for_stripe); 1834 init_waitqueue_head(&conf->wait_for_overlap); 1835 INIT_LIST_HEAD(&conf->handle_list); 1836 INIT_LIST_HEAD(&conf->delayed_list); 1837 INIT_LIST_HEAD(&conf->bitmap_list); 1838 INIT_LIST_HEAD(&conf->inactive_list); 1839 atomic_set(&conf->active_stripes, 0); 1840 atomic_set(&conf->preread_active_stripes, 0); 1841 1842 PRINTK("raid5: run(%s) called.\n", mdname(mddev)); 1843 1844 ITERATE_RDEV(mddev,rdev,tmp) { 1845 raid_disk = rdev->raid_disk; 1846 if (raid_disk >= mddev->raid_disks 1847 || raid_disk < 0) 1848 continue; 1849 disk = conf->disks + raid_disk; 1850 1851 disk->rdev = rdev; 1852 1853 if (test_bit(In_sync, &rdev->flags)) { 1854 char b[BDEVNAME_SIZE]; 1855 printk(KERN_INFO "raid5: device %s operational as raid" 1856 " disk %d\n", bdevname(rdev->bdev,b), 1857 raid_disk); 1858 conf->working_disks++; 1859 } 1860 } 1861 1862 conf->raid_disks = mddev->raid_disks; 1863 /* 1864 * 0 for a fully functional array, 1 for a degraded array. 1865 */ 1866 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; 1867 conf->mddev = mddev; 1868 conf->chunk_size = mddev->chunk_size; 1869 conf->level = mddev->level; 1870 conf->algorithm = mddev->layout; 1871 conf->max_nr_stripes = NR_STRIPES; 1872 1873 /* device size must be a multiple of chunk size */ 1874 mddev->size &= ~(mddev->chunk_size/1024 -1); 1875 mddev->resync_max_sectors = mddev->size << 1; 1876 1877 if (!conf->chunk_size || conf->chunk_size % 4) { 1878 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 1879 conf->chunk_size, mdname(mddev)); 1880 goto abort; 1881 } 1882 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { 1883 printk(KERN_ERR 1884 "raid5: unsupported parity algorithm %d for %s\n", 1885 conf->algorithm, mdname(mddev)); 1886 goto abort; 1887 } 1888 if (mddev->degraded > 1) { 1889 printk(KERN_ERR "raid5: not enough operational devices for %s" 1890 " (%d/%d failed)\n", 1891 mdname(mddev), conf->failed_disks, conf->raid_disks); 1892 goto abort; 1893 } 1894 1895 if (mddev->degraded == 1 && 1896 mddev->recovery_cp != MaxSector) { 1897 if (mddev->ok_start_degraded) 1898 printk(KERN_WARNING 1899 "raid5: starting dirty degraded array: %s" 1900 "- data corruption possible.\n", 1901 mdname(mddev)); 1902 else { 1903 printk(KERN_ERR 1904 "raid5: cannot start dirty degraded array for %s\n", 1905 mdname(mddev)); 1906 goto abort; 1907 } 1908 } 1909 1910 { 1911 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 1912 if (!mddev->thread) { 1913 printk(KERN_ERR 1914 "raid5: couldn't allocate thread for %s\n", 1915 mdname(mddev)); 1916 goto abort; 1917 } 1918 } 1919 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 1920 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 1921 if (grow_stripes(conf, conf->max_nr_stripes)) { 1922 printk(KERN_ERR 1923 "raid5: couldn't allocate %dkB for buffers\n", memory); 1924 shrink_stripes(conf); 1925 md_unregister_thread(mddev->thread); 1926 goto abort; 1927 } else 1928 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 1929 memory, mdname(mddev)); 1930 1931 if (mddev->degraded == 0) 1932 printk("raid5: raid level %d set %s active with %d out of %d" 1933 " devices, algorithm %d\n", conf->level, mdname(mddev), 1934 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 1935 conf->algorithm); 1936 else 1937 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 1938 " out of %d devices, algorithm %d\n", conf->level, 1939 mdname(mddev), mddev->raid_disks - mddev->degraded, 1940 mddev->raid_disks, conf->algorithm); 1941 1942 print_raid5_conf(conf); 1943 1944 /* read-ahead size must cover two whole stripes, which is 1945 * 2 * (n-1) * chunksize where 'n' is the number of raid devices 1946 */ 1947 { 1948 int stripe = (mddev->raid_disks-1) * mddev->chunk_size 1949 / PAGE_SIZE; 1950 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 1951 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 1952 } 1953 1954 /* Ok, everything is just fine now */ 1955 sysfs_create_group(&mddev->kobj, &raid5_attrs_group); 1956 1957 mddev->queue->unplug_fn = raid5_unplug_device; 1958 mddev->queue->issue_flush_fn = raid5_issue_flush; 1959 1960 mddev->array_size = mddev->size * (mddev->raid_disks - 1); 1961 return 0; 1962abort: 1963 if (conf) { 1964 print_raid5_conf(conf); 1965 kfree(conf->stripe_hashtbl); 1966 kfree(conf); 1967 } 1968 mddev->private = NULL; 1969 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 1970 return -EIO; 1971} 1972 1973 1974 1975static int stop(mddev_t *mddev) 1976{ 1977 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1978 1979 md_unregister_thread(mddev->thread); 1980 mddev->thread = NULL; 1981 shrink_stripes(conf); 1982 kfree(conf->stripe_hashtbl); 1983 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 1984 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 1985 kfree(conf); 1986 mddev->private = NULL; 1987 return 0; 1988} 1989 1990#if RAID5_DEBUG 1991static void print_sh (struct stripe_head *sh) 1992{ 1993 int i; 1994 1995 printk("sh %llu, pd_idx %d, state %ld.\n", 1996 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 1997 printk("sh %llu, count %d.\n", 1998 (unsigned long long)sh->sector, atomic_read(&sh->count)); 1999 printk("sh %llu, ", (unsigned long long)sh->sector); 2000 for (i = 0; i < sh->raid_conf->raid_disks; i++) { 2001 printk("(cache%d: %p %ld) ", 2002 i, sh->dev[i].page, sh->dev[i].flags); 2003 } 2004 printk("\n"); 2005} 2006 2007static void printall (raid5_conf_t *conf) 2008{ 2009 struct stripe_head *sh; 2010 struct hlist_node *hn; 2011 int i; 2012 2013 spin_lock_irq(&conf->device_lock); 2014 for (i = 0; i < NR_HASH; i++) { 2015 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 2016 if (sh->raid_conf != conf) 2017 continue; 2018 print_sh(sh); 2019 } 2020 } 2021 spin_unlock_irq(&conf->device_lock); 2022} 2023#endif 2024 2025static void status (struct seq_file *seq, mddev_t *mddev) 2026{ 2027 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 2028 int i; 2029 2030 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 2031 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks); 2032 for (i = 0; i < conf->raid_disks; i++) 2033 seq_printf (seq, "%s", 2034 conf->disks[i].rdev && 2035 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 2036 seq_printf (seq, "]"); 2037#if RAID5_DEBUG 2038#define D(x) \ 2039 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x)) 2040 printall(conf); 2041#endif 2042} 2043 2044static void print_raid5_conf (raid5_conf_t *conf) 2045{ 2046 int i; 2047 struct disk_info *tmp; 2048 2049 printk("RAID5 conf printout:\n"); 2050 if (!conf) { 2051 printk("(conf==NULL)\n"); 2052 return; 2053 } 2054 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks, 2055 conf->working_disks, conf->failed_disks); 2056 2057 for (i = 0; i < conf->raid_disks; i++) { 2058 char b[BDEVNAME_SIZE]; 2059 tmp = conf->disks + i; 2060 if (tmp->rdev) 2061 printk(" disk %d, o:%d, dev:%s\n", 2062 i, !test_bit(Faulty, &tmp->rdev->flags), 2063 bdevname(tmp->rdev->bdev,b)); 2064 } 2065} 2066 2067static int raid5_spare_active(mddev_t *mddev) 2068{ 2069 int i; 2070 raid5_conf_t *conf = mddev->private; 2071 struct disk_info *tmp; 2072 2073 for (i = 0; i < conf->raid_disks; i++) { 2074 tmp = conf->disks + i; 2075 if (tmp->rdev 2076 && !test_bit(Faulty, &tmp->rdev->flags) 2077 && !test_bit(In_sync, &tmp->rdev->flags)) { 2078 mddev->degraded--; 2079 conf->failed_disks--; 2080 conf->working_disks++; 2081 set_bit(In_sync, &tmp->rdev->flags); 2082 } 2083 } 2084 print_raid5_conf(conf); 2085 return 0; 2086} 2087 2088static int raid5_remove_disk(mddev_t *mddev, int number) 2089{ 2090 raid5_conf_t *conf = mddev->private; 2091 int err = 0; 2092 mdk_rdev_t *rdev; 2093 struct disk_info *p = conf->disks + number; 2094 2095 print_raid5_conf(conf); 2096 rdev = p->rdev; 2097 if (rdev) { 2098 if (test_bit(In_sync, &rdev->flags) || 2099 atomic_read(&rdev->nr_pending)) { 2100 err = -EBUSY; 2101 goto abort; 2102 } 2103 p->rdev = NULL; 2104 synchronize_rcu(); 2105 if (atomic_read(&rdev->nr_pending)) { 2106 /* lost the race, try later */ 2107 err = -EBUSY; 2108 p->rdev = rdev; 2109 } 2110 } 2111abort: 2112 2113 print_raid5_conf(conf); 2114 return err; 2115} 2116 2117static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 2118{ 2119 raid5_conf_t *conf = mddev->private; 2120 int found = 0; 2121 int disk; 2122 struct disk_info *p; 2123 2124 if (mddev->degraded > 1) 2125 /* no point adding a device */ 2126 return 0; 2127 2128 /* 2129 * find the disk ... 2130 */ 2131 for (disk=0; disk < mddev->raid_disks; disk++) 2132 if ((p=conf->disks + disk)->rdev == NULL) { 2133 clear_bit(In_sync, &rdev->flags); 2134 rdev->raid_disk = disk; 2135 found = 1; 2136 if (rdev->saved_raid_disk != disk) 2137 conf->fullsync = 1; 2138 rcu_assign_pointer(p->rdev, rdev); 2139 break; 2140 } 2141 print_raid5_conf(conf); 2142 return found; 2143} 2144 2145static int raid5_resize(mddev_t *mddev, sector_t sectors) 2146{ 2147 /* no resync is happening, and there is enough space 2148 * on all devices, so we can resize. 2149 * We need to make sure resync covers any new space. 2150 * If the array is shrinking we should possibly wait until 2151 * any io in the removed space completes, but it hardly seems 2152 * worth it. 2153 */ 2154 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 2155 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1; 2156 set_capacity(mddev->gendisk, mddev->array_size << 1); 2157 mddev->changed = 1; 2158 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 2159 mddev->recovery_cp = mddev->size << 1; 2160 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2161 } 2162 mddev->size = sectors /2; 2163 mddev->resync_max_sectors = sectors; 2164 return 0; 2165} 2166 2167static void raid5_quiesce(mddev_t *mddev, int state) 2168{ 2169 raid5_conf_t *conf = mddev_to_conf(mddev); 2170 2171 switch(state) { 2172 case 1: /* stop all writes */ 2173 spin_lock_irq(&conf->device_lock); 2174 conf->quiesce = 1; 2175 wait_event_lock_irq(conf->wait_for_stripe, 2176 atomic_read(&conf->active_stripes) == 0, 2177 conf->device_lock, /* nothing */); 2178 spin_unlock_irq(&conf->device_lock); 2179 break; 2180 2181 case 0: /* re-enable writes */ 2182 spin_lock_irq(&conf->device_lock); 2183 conf->quiesce = 0; 2184 wake_up(&conf->wait_for_stripe); 2185 spin_unlock_irq(&conf->device_lock); 2186 break; 2187 } 2188} 2189 2190static struct mdk_personality raid5_personality = 2191{ 2192 .name = "raid5", 2193 .level = 5, 2194 .owner = THIS_MODULE, 2195 .make_request = make_request, 2196 .run = run, 2197 .stop = stop, 2198 .status = status, 2199 .error_handler = error, 2200 .hot_add_disk = raid5_add_disk, 2201 .hot_remove_disk= raid5_remove_disk, 2202 .spare_active = raid5_spare_active, 2203 .sync_request = sync_request, 2204 .resize = raid5_resize, 2205 .quiesce = raid5_quiesce, 2206}; 2207 2208static struct mdk_personality raid4_personality = 2209{ 2210 .name = "raid4", 2211 .level = 4, 2212 .owner = THIS_MODULE, 2213 .make_request = make_request, 2214 .run = run, 2215 .stop = stop, 2216 .status = status, 2217 .error_handler = error, 2218 .hot_add_disk = raid5_add_disk, 2219 .hot_remove_disk= raid5_remove_disk, 2220 .spare_active = raid5_spare_active, 2221 .sync_request = sync_request, 2222 .resize = raid5_resize, 2223 .quiesce = raid5_quiesce, 2224}; 2225 2226static int __init raid5_init(void) 2227{ 2228 register_md_personality(&raid5_personality); 2229 register_md_personality(&raid4_personality); 2230 return 0; 2231} 2232 2233static void raid5_exit(void) 2234{ 2235 unregister_md_personality(&raid5_personality); 2236 unregister_md_personality(&raid4_personality); 2237} 2238 2239module_init(raid5_init); 2240module_exit(raid5_exit); 2241MODULE_LICENSE("GPL"); 2242MODULE_ALIAS("md-personality-4"); /* RAID5 */ 2243MODULE_ALIAS("md-raid5"); 2244MODULE_ALIAS("md-raid4"); 2245MODULE_ALIAS("md-level-5"); 2246MODULE_ALIAS("md-level-4"); 2247