journal.c revision 1b207d80d5b986fb305bc899357435d319319513
1/* 2 * bcache journalling code, for btree insertions 3 * 4 * Copyright 2012 Google, Inc. 5 */ 6 7#include "bcache.h" 8#include "btree.h" 9#include "debug.h" 10 11#include <trace/events/bcache.h> 12 13/* 14 * Journal replay/recovery: 15 * 16 * This code is all driven from run_cache_set(); we first read the journal 17 * entries, do some other stuff, then we mark all the keys in the journal 18 * entries (same as garbage collection would), then we replay them - reinserting 19 * them into the cache in precisely the same order as they appear in the 20 * journal. 21 * 22 * We only journal keys that go in leaf nodes, which simplifies things quite a 23 * bit. 24 */ 25 26static void journal_read_endio(struct bio *bio, int error) 27{ 28 struct closure *cl = bio->bi_private; 29 closure_put(cl); 30} 31 32static int journal_read_bucket(struct cache *ca, struct list_head *list, 33 unsigned bucket_index) 34{ 35 struct journal_device *ja = &ca->journal; 36 struct bio *bio = &ja->bio; 37 38 struct journal_replay *i; 39 struct jset *j, *data = ca->set->journal.w[0].data; 40 struct closure cl; 41 unsigned len, left, offset = 0; 42 int ret = 0; 43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); 44 45 closure_init_stack(&cl); 46 47 pr_debug("reading %llu", (uint64_t) bucket); 48 49 while (offset < ca->sb.bucket_size) { 50reread: left = ca->sb.bucket_size - offset; 51 len = min_t(unsigned, left, PAGE_SECTORS * 8); 52 53 bio_reset(bio); 54 bio->bi_sector = bucket + offset; 55 bio->bi_bdev = ca->bdev; 56 bio->bi_rw = READ; 57 bio->bi_size = len << 9; 58 59 bio->bi_end_io = journal_read_endio; 60 bio->bi_private = &cl; 61 bch_bio_map(bio, data); 62 63 closure_bio_submit(bio, &cl, ca); 64 closure_sync(&cl); 65 66 /* This function could be simpler now since we no longer write 67 * journal entries that overlap bucket boundaries; this means 68 * the start of a bucket will always have a valid journal entry 69 * if it has any journal entries at all. 70 */ 71 72 j = data; 73 while (len) { 74 struct list_head *where; 75 size_t blocks, bytes = set_bytes(j); 76 77 if (j->magic != jset_magic(ca->set)) 78 return ret; 79 80 if (bytes > left << 9) 81 return ret; 82 83 if (bytes > len << 9) 84 goto reread; 85 86 if (j->csum != csum_set(j)) 87 return ret; 88 89 blocks = set_blocks(j, ca->set); 90 91 while (!list_empty(list)) { 92 i = list_first_entry(list, 93 struct journal_replay, list); 94 if (i->j.seq >= j->last_seq) 95 break; 96 list_del(&i->list); 97 kfree(i); 98 } 99 100 list_for_each_entry_reverse(i, list, list) { 101 if (j->seq == i->j.seq) 102 goto next_set; 103 104 if (j->seq < i->j.last_seq) 105 goto next_set; 106 107 if (j->seq > i->j.seq) { 108 where = &i->list; 109 goto add; 110 } 111 } 112 113 where = list; 114add: 115 i = kmalloc(offsetof(struct journal_replay, j) + 116 bytes, GFP_KERNEL); 117 if (!i) 118 return -ENOMEM; 119 memcpy(&i->j, j, bytes); 120 list_add(&i->list, where); 121 ret = 1; 122 123 ja->seq[bucket_index] = j->seq; 124next_set: 125 offset += blocks * ca->sb.block_size; 126 len -= blocks * ca->sb.block_size; 127 j = ((void *) j) + blocks * block_bytes(ca); 128 } 129 } 130 131 return ret; 132} 133 134int bch_journal_read(struct cache_set *c, struct list_head *list) 135{ 136#define read_bucket(b) \ 137 ({ \ 138 int ret = journal_read_bucket(ca, list, b); \ 139 __set_bit(b, bitmap); \ 140 if (ret < 0) \ 141 return ret; \ 142 ret; \ 143 }) 144 145 struct cache *ca; 146 unsigned iter; 147 148 for_each_cache(ca, c, iter) { 149 struct journal_device *ja = &ca->journal; 150 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG]; 151 unsigned i, l, r, m; 152 uint64_t seq; 153 154 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 155 pr_debug("%u journal buckets", ca->sb.njournal_buckets); 156 157 /* 158 * Read journal buckets ordered by golden ratio hash to quickly 159 * find a sequence of buckets with valid journal entries 160 */ 161 for (i = 0; i < ca->sb.njournal_buckets; i++) { 162 l = (i * 2654435769U) % ca->sb.njournal_buckets; 163 164 if (test_bit(l, bitmap)) 165 break; 166 167 if (read_bucket(l)) 168 goto bsearch; 169 } 170 171 /* 172 * If that fails, check all the buckets we haven't checked 173 * already 174 */ 175 pr_debug("falling back to linear search"); 176 177 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); 178 l < ca->sb.njournal_buckets; 179 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1)) 180 if (read_bucket(l)) 181 goto bsearch; 182 183 if (list_empty(list)) 184 continue; 185bsearch: 186 /* Binary search */ 187 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 188 pr_debug("starting binary search, l %u r %u", l, r); 189 190 while (l + 1 < r) { 191 seq = list_entry(list->prev, struct journal_replay, 192 list)->j.seq; 193 194 m = (l + r) >> 1; 195 read_bucket(m); 196 197 if (seq != list_entry(list->prev, struct journal_replay, 198 list)->j.seq) 199 l = m; 200 else 201 r = m; 202 } 203 204 /* 205 * Read buckets in reverse order until we stop finding more 206 * journal entries 207 */ 208 pr_debug("finishing up: m %u njournal_buckets %u", 209 m, ca->sb.njournal_buckets); 210 l = m; 211 212 while (1) { 213 if (!l--) 214 l = ca->sb.njournal_buckets - 1; 215 216 if (l == m) 217 break; 218 219 if (test_bit(l, bitmap)) 220 continue; 221 222 if (!read_bucket(l)) 223 break; 224 } 225 226 seq = 0; 227 228 for (i = 0; i < ca->sb.njournal_buckets; i++) 229 if (ja->seq[i] > seq) { 230 seq = ja->seq[i]; 231 ja->cur_idx = ja->discard_idx = 232 ja->last_idx = i; 233 234 } 235 } 236 237 if (!list_empty(list)) 238 c->journal.seq = list_entry(list->prev, 239 struct journal_replay, 240 list)->j.seq; 241 242 return 0; 243#undef read_bucket 244} 245 246void bch_journal_mark(struct cache_set *c, struct list_head *list) 247{ 248 atomic_t p = { 0 }; 249 struct bkey *k; 250 struct journal_replay *i; 251 struct journal *j = &c->journal; 252 uint64_t last = j->seq; 253 254 /* 255 * journal.pin should never fill up - we never write a journal 256 * entry when it would fill up. But if for some reason it does, we 257 * iterate over the list in reverse order so that we can just skip that 258 * refcount instead of bugging. 259 */ 260 261 list_for_each_entry_reverse(i, list, list) { 262 BUG_ON(last < i->j.seq); 263 i->pin = NULL; 264 265 while (last-- != i->j.seq) 266 if (fifo_free(&j->pin) > 1) { 267 fifo_push_front(&j->pin, p); 268 atomic_set(&fifo_front(&j->pin), 0); 269 } 270 271 if (fifo_free(&j->pin) > 1) { 272 fifo_push_front(&j->pin, p); 273 i->pin = &fifo_front(&j->pin); 274 atomic_set(i->pin, 1); 275 } 276 277 for (k = i->j.start; 278 k < end(&i->j); 279 k = bkey_next(k)) { 280 unsigned j; 281 282 for (j = 0; j < KEY_PTRS(k); j++) { 283 struct bucket *g = PTR_BUCKET(c, k, j); 284 atomic_inc(&g->pin); 285 286 if (g->prio == BTREE_PRIO && 287 !ptr_stale(c, k, j)) 288 g->prio = INITIAL_PRIO; 289 } 290 291 __bch_btree_mark_key(c, 0, k); 292 } 293 } 294} 295 296int bch_journal_replay(struct cache_set *s, struct list_head *list) 297{ 298 int ret = 0, keys = 0, entries = 0; 299 struct bkey *k; 300 struct journal_replay *i = 301 list_entry(list->prev, struct journal_replay, list); 302 303 uint64_t start = i->j.last_seq, end = i->j.seq, n = start; 304 struct keylist keylist; 305 struct btree_op op; 306 307 bch_keylist_init(&keylist); 308 bch_btree_op_init(&op, SHRT_MAX); 309 310 list_for_each_entry(i, list, list) { 311 BUG_ON(i->pin && atomic_read(i->pin) != 1); 312 313 cache_set_err_on(n != i->j.seq, s, 314"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", 315 n, i->j.seq - 1, start, end); 316 317 for (k = i->j.start; 318 k < end(&i->j); 319 k = bkey_next(k)) { 320 trace_bcache_journal_replay_key(k); 321 322 bkey_copy(keylist.top, k); 323 bch_keylist_push(&keylist); 324 325 ret = bch_btree_insert(&op, s, &keylist, i->pin, NULL); 326 if (ret) 327 goto err; 328 329 BUG_ON(!bch_keylist_empty(&keylist)); 330 keys++; 331 332 cond_resched(); 333 } 334 335 if (i->pin) 336 atomic_dec(i->pin); 337 n = i->j.seq + 1; 338 entries++; 339 } 340 341 pr_info("journal replay done, %i keys in %i entries, seq %llu", 342 keys, entries, end); 343err: 344 while (!list_empty(list)) { 345 i = list_first_entry(list, struct journal_replay, list); 346 list_del(&i->list); 347 kfree(i); 348 } 349 350 return ret; 351} 352 353/* Journalling */ 354 355static void btree_flush_write(struct cache_set *c) 356{ 357 /* 358 * Try to find the btree node with that references the oldest journal 359 * entry, best is our current candidate and is locked if non NULL: 360 */ 361 struct btree *b, *best; 362 unsigned i; 363retry: 364 best = NULL; 365 366 for_each_cached_btree(b, c, i) 367 if (btree_current_write(b)->journal) { 368 if (!best) 369 best = b; 370 else if (journal_pin_cmp(c, 371 btree_current_write(best)->journal, 372 btree_current_write(b)->journal)) { 373 best = b; 374 } 375 } 376 377 b = best; 378 if (b) { 379 rw_lock(true, b, b->level); 380 381 if (!btree_current_write(b)->journal) { 382 rw_unlock(true, b); 383 /* We raced */ 384 goto retry; 385 } 386 387 bch_btree_node_write(b, NULL); 388 rw_unlock(true, b); 389 } 390} 391 392#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1) 393 394static void journal_discard_endio(struct bio *bio, int error) 395{ 396 struct journal_device *ja = 397 container_of(bio, struct journal_device, discard_bio); 398 struct cache *ca = container_of(ja, struct cache, journal); 399 400 atomic_set(&ja->discard_in_flight, DISCARD_DONE); 401 402 closure_wake_up(&ca->set->journal.wait); 403 closure_put(&ca->set->cl); 404} 405 406static void journal_discard_work(struct work_struct *work) 407{ 408 struct journal_device *ja = 409 container_of(work, struct journal_device, discard_work); 410 411 submit_bio(0, &ja->discard_bio); 412} 413 414static void do_journal_discard(struct cache *ca) 415{ 416 struct journal_device *ja = &ca->journal; 417 struct bio *bio = &ja->discard_bio; 418 419 if (!ca->discard) { 420 ja->discard_idx = ja->last_idx; 421 return; 422 } 423 424 switch (atomic_read(&ja->discard_in_flight)) { 425 case DISCARD_IN_FLIGHT: 426 return; 427 428 case DISCARD_DONE: 429 ja->discard_idx = (ja->discard_idx + 1) % 430 ca->sb.njournal_buckets; 431 432 atomic_set(&ja->discard_in_flight, DISCARD_READY); 433 /* fallthrough */ 434 435 case DISCARD_READY: 436 if (ja->discard_idx == ja->last_idx) 437 return; 438 439 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 440 441 bio_init(bio); 442 bio->bi_sector = bucket_to_sector(ca->set, 443 ca->sb.d[ja->discard_idx]); 444 bio->bi_bdev = ca->bdev; 445 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 446 bio->bi_max_vecs = 1; 447 bio->bi_io_vec = bio->bi_inline_vecs; 448 bio->bi_size = bucket_bytes(ca); 449 bio->bi_end_io = journal_discard_endio; 450 451 closure_get(&ca->set->cl); 452 INIT_WORK(&ja->discard_work, journal_discard_work); 453 schedule_work(&ja->discard_work); 454 } 455} 456 457static void journal_reclaim(struct cache_set *c) 458{ 459 struct bkey *k = &c->journal.key; 460 struct cache *ca; 461 uint64_t last_seq; 462 unsigned iter, n = 0; 463 atomic_t p; 464 465 while (!atomic_read(&fifo_front(&c->journal.pin))) 466 fifo_pop(&c->journal.pin, p); 467 468 last_seq = last_seq(&c->journal); 469 470 /* Update last_idx */ 471 472 for_each_cache(ca, c, iter) { 473 struct journal_device *ja = &ca->journal; 474 475 while (ja->last_idx != ja->cur_idx && 476 ja->seq[ja->last_idx] < last_seq) 477 ja->last_idx = (ja->last_idx + 1) % 478 ca->sb.njournal_buckets; 479 } 480 481 for_each_cache(ca, c, iter) 482 do_journal_discard(ca); 483 484 if (c->journal.blocks_free) 485 goto out; 486 487 /* 488 * Allocate: 489 * XXX: Sort by free journal space 490 */ 491 492 for_each_cache(ca, c, iter) { 493 struct journal_device *ja = &ca->journal; 494 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; 495 496 /* No space available on this device */ 497 if (next == ja->discard_idx) 498 continue; 499 500 ja->cur_idx = next; 501 k->ptr[n++] = PTR(0, 502 bucket_to_sector(c, ca->sb.d[ja->cur_idx]), 503 ca->sb.nr_this_dev); 504 } 505 506 bkey_init(k); 507 SET_KEY_PTRS(k, n); 508 509 if (n) 510 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; 511out: 512 if (!journal_full(&c->journal)) 513 __closure_wake_up(&c->journal.wait); 514} 515 516void bch_journal_next(struct journal *j) 517{ 518 atomic_t p = { 1 }; 519 520 j->cur = (j->cur == j->w) 521 ? &j->w[1] 522 : &j->w[0]; 523 524 /* 525 * The fifo_push() needs to happen at the same time as j->seq is 526 * incremented for last_seq() to be calculated correctly 527 */ 528 BUG_ON(!fifo_push(&j->pin, p)); 529 atomic_set(&fifo_back(&j->pin), 1); 530 531 j->cur->data->seq = ++j->seq; 532 j->cur->need_write = false; 533 j->cur->data->keys = 0; 534 535 if (fifo_full(&j->pin)) 536 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin)); 537} 538 539static void journal_write_endio(struct bio *bio, int error) 540{ 541 struct journal_write *w = bio->bi_private; 542 543 cache_set_err_on(error, w->c, "journal io error"); 544 closure_put(&w->c->journal.io); 545} 546 547static void journal_write(struct closure *); 548 549static void journal_write_done(struct closure *cl) 550{ 551 struct journal *j = container_of(cl, struct journal, io); 552 struct journal_write *w = (j->cur == j->w) 553 ? &j->w[1] 554 : &j->w[0]; 555 556 __closure_wake_up(&w->wait); 557 continue_at_nobarrier(cl, journal_write, system_wq); 558} 559 560static void journal_write_unlocked(struct closure *cl) 561 __releases(c->journal.lock) 562{ 563 struct cache_set *c = container_of(cl, struct cache_set, journal.io); 564 struct cache *ca; 565 struct journal_write *w = c->journal.cur; 566 struct bkey *k = &c->journal.key; 567 unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size; 568 569 struct bio *bio; 570 struct bio_list list; 571 bio_list_init(&list); 572 573 if (!w->need_write) { 574 /* 575 * XXX: have to unlock closure before we unlock journal lock, 576 * else we race with bch_journal(). But this way we race 577 * against cache set unregister. Doh. 578 */ 579 set_closure_fn(cl, NULL, NULL); 580 closure_sub(cl, CLOSURE_RUNNING + 1); 581 spin_unlock(&c->journal.lock); 582 return; 583 } else if (journal_full(&c->journal)) { 584 journal_reclaim(c); 585 spin_unlock(&c->journal.lock); 586 587 btree_flush_write(c); 588 continue_at(cl, journal_write, system_wq); 589 } 590 591 c->journal.blocks_free -= set_blocks(w->data, c); 592 593 w->data->btree_level = c->root->level; 594 595 bkey_copy(&w->data->btree_root, &c->root->key); 596 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket); 597 598 for_each_cache(ca, c, i) 599 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0]; 600 601 w->data->magic = jset_magic(c); 602 w->data->version = BCACHE_JSET_VERSION; 603 w->data->last_seq = last_seq(&c->journal); 604 w->data->csum = csum_set(w->data); 605 606 for (i = 0; i < KEY_PTRS(k); i++) { 607 ca = PTR_CACHE(c, k, i); 608 bio = &ca->journal.bio; 609 610 atomic_long_add(sectors, &ca->meta_sectors_written); 611 612 bio_reset(bio); 613 bio->bi_sector = PTR_OFFSET(k, i); 614 bio->bi_bdev = ca->bdev; 615 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 616 bio->bi_size = sectors << 9; 617 618 bio->bi_end_io = journal_write_endio; 619 bio->bi_private = w; 620 bch_bio_map(bio, w->data); 621 622 trace_bcache_journal_write(bio); 623 bio_list_add(&list, bio); 624 625 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors); 626 627 ca->journal.seq[ca->journal.cur_idx] = w->data->seq; 628 } 629 630 atomic_dec_bug(&fifo_back(&c->journal.pin)); 631 bch_journal_next(&c->journal); 632 journal_reclaim(c); 633 634 spin_unlock(&c->journal.lock); 635 636 while ((bio = bio_list_pop(&list))) 637 closure_bio_submit(bio, cl, c->cache[0]); 638 639 continue_at(cl, journal_write_done, NULL); 640} 641 642static void journal_write(struct closure *cl) 643{ 644 struct cache_set *c = container_of(cl, struct cache_set, journal.io); 645 646 spin_lock(&c->journal.lock); 647 journal_write_unlocked(cl); 648} 649 650static void journal_try_write(struct cache_set *c) 651 __releases(c->journal.lock) 652{ 653 struct closure *cl = &c->journal.io; 654 struct journal_write *w = c->journal.cur; 655 656 w->need_write = true; 657 658 if (closure_trylock(cl, &c->cl)) 659 journal_write_unlocked(cl); 660 else 661 spin_unlock(&c->journal.lock); 662} 663 664static struct journal_write *journal_wait_for_write(struct cache_set *c, 665 unsigned nkeys) 666{ 667 size_t sectors; 668 struct closure cl; 669 670 closure_init_stack(&cl); 671 672 spin_lock(&c->journal.lock); 673 674 while (1) { 675 struct journal_write *w = c->journal.cur; 676 677 sectors = __set_blocks(w->data, w->data->keys + nkeys, 678 c) * c->sb.block_size; 679 680 if (sectors <= min_t(size_t, 681 c->journal.blocks_free * c->sb.block_size, 682 PAGE_SECTORS << JSET_BITS)) 683 return w; 684 685 /* XXX: tracepoint */ 686 if (!journal_full(&c->journal)) { 687 trace_bcache_journal_entry_full(c); 688 689 /* 690 * XXX: If we were inserting so many keys that they 691 * won't fit in an _empty_ journal write, we'll 692 * deadlock. For now, handle this in 693 * bch_keylist_realloc() - but something to think about. 694 */ 695 BUG_ON(!w->data->keys); 696 697 closure_wait(&w->wait, &cl); 698 journal_try_write(c); /* unlocks */ 699 } else { 700 trace_bcache_journal_full(c); 701 702 closure_wait(&c->journal.wait, &cl); 703 journal_reclaim(c); 704 spin_unlock(&c->journal.lock); 705 706 btree_flush_write(c); 707 } 708 709 closure_sync(&cl); 710 spin_lock(&c->journal.lock); 711 } 712} 713 714static void journal_write_work(struct work_struct *work) 715{ 716 struct cache_set *c = container_of(to_delayed_work(work), 717 struct cache_set, 718 journal.work); 719 spin_lock(&c->journal.lock); 720 journal_try_write(c); 721} 722 723/* 724 * Entry point to the journalling code - bio_insert() and btree_invalidate() 725 * pass bch_journal() a list of keys to be journalled, and then 726 * bch_journal() hands those same keys off to btree_insert_async() 727 */ 728 729atomic_t *bch_journal(struct cache_set *c, 730 struct keylist *keys, 731 struct closure *parent) 732{ 733 struct journal_write *w; 734 atomic_t *ret; 735 736 if (!CACHE_SYNC(&c->sb)) 737 return NULL; 738 739 w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); 740 741 memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys)); 742 w->data->keys += bch_keylist_nkeys(keys); 743 744 ret = &fifo_back(&c->journal.pin); 745 atomic_inc(ret); 746 747 if (parent) { 748 closure_wait(&w->wait, parent); 749 journal_try_write(c); 750 } else if (!w->need_write) { 751 schedule_delayed_work(&c->journal.work, 752 msecs_to_jiffies(c->journal_delay_ms)); 753 spin_unlock(&c->journal.lock); 754 } else { 755 spin_unlock(&c->journal.lock); 756 } 757 758 759 return ret; 760} 761 762void bch_journal_meta(struct cache_set *c, struct closure *cl) 763{ 764 struct keylist keys; 765 atomic_t *ref; 766 767 bch_keylist_init(&keys); 768 769 ref = bch_journal(c, &keys, cl); 770 if (ref) 771 atomic_dec_bug(ref); 772} 773 774void bch_journal_free(struct cache_set *c) 775{ 776 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS); 777 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS); 778 free_fifo(&c->journal.pin); 779} 780 781int bch_journal_alloc(struct cache_set *c) 782{ 783 struct journal *j = &c->journal; 784 785 closure_init_unlocked(&j->io); 786 spin_lock_init(&j->lock); 787 INIT_DELAYED_WORK(&j->work, journal_write_work); 788 789 c->journal_delay_ms = 100; 790 791 j->w[0].c = c; 792 j->w[1].c = c; 793 794 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || 795 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || 796 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) 797 return -ENOMEM; 798 799 return 0; 800} 801