journal.c revision a34a8bfd4e6358c646928320d37b0425c0762f8a
1/* 2 * bcache journalling code, for btree insertions 3 * 4 * Copyright 2012 Google, Inc. 5 */ 6 7#include "bcache.h" 8#include "btree.h" 9#include "debug.h" 10#include "request.h" 11 12#include <trace/events/bcache.h> 13 14/* 15 * Journal replay/recovery: 16 * 17 * This code is all driven from run_cache_set(); we first read the journal 18 * entries, do some other stuff, then we mark all the keys in the journal 19 * entries (same as garbage collection would), then we replay them - reinserting 20 * them into the cache in precisely the same order as they appear in the 21 * journal. 22 * 23 * We only journal keys that go in leaf nodes, which simplifies things quite a 24 * bit. 25 */ 26 27static void journal_read_endio(struct bio *bio, int error) 28{ 29 struct closure *cl = bio->bi_private; 30 closure_put(cl); 31} 32 33static int journal_read_bucket(struct cache *ca, struct list_head *list, 34 struct btree_op *op, unsigned bucket_index) 35{ 36 struct journal_device *ja = &ca->journal; 37 struct bio *bio = &ja->bio; 38 39 struct journal_replay *i; 40 struct jset *j, *data = ca->set->journal.w[0].data; 41 unsigned len, left, offset = 0; 42 int ret = 0; 43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); 44 45 pr_debug("reading %llu", (uint64_t) bucket); 46 47 while (offset < ca->sb.bucket_size) { 48reread: left = ca->sb.bucket_size - offset; 49 len = min_t(unsigned, left, PAGE_SECTORS * 8); 50 51 bio_reset(bio); 52 bio->bi_sector = bucket + offset; 53 bio->bi_bdev = ca->bdev; 54 bio->bi_rw = READ; 55 bio->bi_size = len << 9; 56 57 bio->bi_end_io = journal_read_endio; 58 bio->bi_private = &op->cl; 59 bch_bio_map(bio, data); 60 61 closure_bio_submit(bio, &op->cl, ca); 62 closure_sync(&op->cl); 63 64 /* This function could be simpler now since we no longer write 65 * journal entries that overlap bucket boundaries; this means 66 * the start of a bucket will always have a valid journal entry 67 * if it has any journal entries at all. 68 */ 69 70 j = data; 71 while (len) { 72 struct list_head *where; 73 size_t blocks, bytes = set_bytes(j); 74 75 if (j->magic != jset_magic(ca->set)) 76 return ret; 77 78 if (bytes > left << 9) 79 return ret; 80 81 if (bytes > len << 9) 82 goto reread; 83 84 if (j->csum != csum_set(j)) 85 return ret; 86 87 blocks = set_blocks(j, ca->set); 88 89 while (!list_empty(list)) { 90 i = list_first_entry(list, 91 struct journal_replay, list); 92 if (i->j.seq >= j->last_seq) 93 break; 94 list_del(&i->list); 95 kfree(i); 96 } 97 98 list_for_each_entry_reverse(i, list, list) { 99 if (j->seq == i->j.seq) 100 goto next_set; 101 102 if (j->seq < i->j.last_seq) 103 goto next_set; 104 105 if (j->seq > i->j.seq) { 106 where = &i->list; 107 goto add; 108 } 109 } 110 111 where = list; 112add: 113 i = kmalloc(offsetof(struct journal_replay, j) + 114 bytes, GFP_KERNEL); 115 if (!i) 116 return -ENOMEM; 117 memcpy(&i->j, j, bytes); 118 list_add(&i->list, where); 119 ret = 1; 120 121 ja->seq[bucket_index] = j->seq; 122next_set: 123 offset += blocks * ca->sb.block_size; 124 len -= blocks * ca->sb.block_size; 125 j = ((void *) j) + blocks * block_bytes(ca); 126 } 127 } 128 129 return ret; 130} 131 132int bch_journal_read(struct cache_set *c, struct list_head *list, 133 struct btree_op *op) 134{ 135#define read_bucket(b) \ 136 ({ \ 137 int ret = journal_read_bucket(ca, list, op, b); \ 138 __set_bit(b, bitmap); \ 139 if (ret < 0) \ 140 return ret; \ 141 ret; \ 142 }) 143 144 struct cache *ca; 145 unsigned iter; 146 147 for_each_cache(ca, c, iter) { 148 struct journal_device *ja = &ca->journal; 149 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG]; 150 unsigned i, l, r, m; 151 uint64_t seq; 152 153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 154 pr_debug("%u journal buckets", ca->sb.njournal_buckets); 155 156 /* 157 * Read journal buckets ordered by golden ratio hash to quickly 158 * find a sequence of buckets with valid journal entries 159 */ 160 for (i = 0; i < ca->sb.njournal_buckets; i++) { 161 l = (i * 2654435769U) % ca->sb.njournal_buckets; 162 163 if (test_bit(l, bitmap)) 164 break; 165 166 if (read_bucket(l)) 167 goto bsearch; 168 } 169 170 /* 171 * If that fails, check all the buckets we haven't checked 172 * already 173 */ 174 pr_debug("falling back to linear search"); 175 176 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); 177 l < ca->sb.njournal_buckets; 178 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1)) 179 if (read_bucket(l)) 180 goto bsearch; 181 182 if (list_empty(list)) 183 continue; 184bsearch: 185 /* Binary search */ 186 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 187 pr_debug("starting binary search, l %u r %u", l, r); 188 189 while (l + 1 < r) { 190 seq = list_entry(list->prev, struct journal_replay, 191 list)->j.seq; 192 193 m = (l + r) >> 1; 194 read_bucket(m); 195 196 if (seq != list_entry(list->prev, struct journal_replay, 197 list)->j.seq) 198 l = m; 199 else 200 r = m; 201 } 202 203 /* 204 * Read buckets in reverse order until we stop finding more 205 * journal entries 206 */ 207 pr_debug("finishing up: m %u njournal_buckets %u", 208 m, ca->sb.njournal_buckets); 209 l = m; 210 211 while (1) { 212 if (!l--) 213 l = ca->sb.njournal_buckets - 1; 214 215 if (l == m) 216 break; 217 218 if (test_bit(l, bitmap)) 219 continue; 220 221 if (!read_bucket(l)) 222 break; 223 } 224 225 seq = 0; 226 227 for (i = 0; i < ca->sb.njournal_buckets; i++) 228 if (ja->seq[i] > seq) { 229 seq = ja->seq[i]; 230 ja->cur_idx = ja->discard_idx = 231 ja->last_idx = i; 232 233 } 234 } 235 236 if (!list_empty(list)) 237 c->journal.seq = list_entry(list->prev, 238 struct journal_replay, 239 list)->j.seq; 240 241 return 0; 242#undef read_bucket 243} 244 245void bch_journal_mark(struct cache_set *c, struct list_head *list) 246{ 247 atomic_t p = { 0 }; 248 struct bkey *k; 249 struct journal_replay *i; 250 struct journal *j = &c->journal; 251 uint64_t last = j->seq; 252 253 /* 254 * journal.pin should never fill up - we never write a journal 255 * entry when it would fill up. But if for some reason it does, we 256 * iterate over the list in reverse order so that we can just skip that 257 * refcount instead of bugging. 258 */ 259 260 list_for_each_entry_reverse(i, list, list) { 261 BUG_ON(last < i->j.seq); 262 i->pin = NULL; 263 264 while (last-- != i->j.seq) 265 if (fifo_free(&j->pin) > 1) { 266 fifo_push_front(&j->pin, p); 267 atomic_set(&fifo_front(&j->pin), 0); 268 } 269 270 if (fifo_free(&j->pin) > 1) { 271 fifo_push_front(&j->pin, p); 272 i->pin = &fifo_front(&j->pin); 273 atomic_set(i->pin, 1); 274 } 275 276 for (k = i->j.start; 277 k < end(&i->j); 278 k = bkey_next(k)) { 279 unsigned j; 280 281 for (j = 0; j < KEY_PTRS(k); j++) { 282 struct bucket *g = PTR_BUCKET(c, k, j); 283 atomic_inc(&g->pin); 284 285 if (g->prio == BTREE_PRIO && 286 !ptr_stale(c, k, j)) 287 g->prio = INITIAL_PRIO; 288 } 289 290 __bch_btree_mark_key(c, 0, k); 291 } 292 } 293} 294 295int bch_journal_replay(struct cache_set *s, struct list_head *list, 296 struct btree_op *op) 297{ 298 int ret = 0, keys = 0, entries = 0; 299 struct bkey *k; 300 struct journal_replay *i = 301 list_entry(list->prev, struct journal_replay, list); 302 303 uint64_t start = i->j.last_seq, end = i->j.seq, n = start; 304 305 list_for_each_entry(i, list, list) { 306 BUG_ON(i->pin && atomic_read(i->pin) != 1); 307 308 cache_set_err_on(n != i->j.seq, s, 309"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", 310 n, i->j.seq - 1, start, end); 311 312 for (k = i->j.start; 313 k < end(&i->j); 314 k = bkey_next(k)) { 315 trace_bcache_journal_replay_key(k); 316 317 bkey_copy(op->keys.top, k); 318 bch_keylist_push(&op->keys); 319 320 op->journal = i->pin; 321 322 ret = bch_btree_insert(op, s, &op->keys); 323 if (ret) 324 goto err; 325 326 BUG_ON(!bch_keylist_empty(&op->keys)); 327 keys++; 328 329 cond_resched(); 330 } 331 332 if (i->pin) 333 atomic_dec(i->pin); 334 n = i->j.seq + 1; 335 entries++; 336 } 337 338 pr_info("journal replay done, %i keys in %i entries, seq %llu", 339 keys, entries, end); 340 341 while (!list_empty(list)) { 342 i = list_first_entry(list, struct journal_replay, list); 343 list_del(&i->list); 344 kfree(i); 345 } 346err: 347 closure_sync(&op->cl); 348 return ret; 349} 350 351/* Journalling */ 352 353static void btree_flush_write(struct cache_set *c) 354{ 355 /* 356 * Try to find the btree node with that references the oldest journal 357 * entry, best is our current candidate and is locked if non NULL: 358 */ 359 struct btree *b, *best; 360 unsigned i; 361retry: 362 best = NULL; 363 364 for_each_cached_btree(b, c, i) 365 if (btree_current_write(b)->journal) { 366 if (!best) 367 best = b; 368 else if (journal_pin_cmp(c, 369 btree_current_write(best), 370 btree_current_write(b))) { 371 best = b; 372 } 373 } 374 375 b = best; 376 if (b) { 377 rw_lock(true, b, b->level); 378 379 if (!btree_current_write(b)->journal) { 380 rw_unlock(true, b); 381 /* We raced */ 382 goto retry; 383 } 384 385 bch_btree_node_write(b, NULL); 386 rw_unlock(true, b); 387 } 388} 389 390#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1) 391 392static void journal_discard_endio(struct bio *bio, int error) 393{ 394 struct journal_device *ja = 395 container_of(bio, struct journal_device, discard_bio); 396 struct cache *ca = container_of(ja, struct cache, journal); 397 398 atomic_set(&ja->discard_in_flight, DISCARD_DONE); 399 400 closure_wake_up(&ca->set->journal.wait); 401 closure_put(&ca->set->cl); 402} 403 404static void journal_discard_work(struct work_struct *work) 405{ 406 struct journal_device *ja = 407 container_of(work, struct journal_device, discard_work); 408 409 submit_bio(0, &ja->discard_bio); 410} 411 412static void do_journal_discard(struct cache *ca) 413{ 414 struct journal_device *ja = &ca->journal; 415 struct bio *bio = &ja->discard_bio; 416 417 if (!ca->discard) { 418 ja->discard_idx = ja->last_idx; 419 return; 420 } 421 422 switch (atomic_read(&ja->discard_in_flight)) { 423 case DISCARD_IN_FLIGHT: 424 return; 425 426 case DISCARD_DONE: 427 ja->discard_idx = (ja->discard_idx + 1) % 428 ca->sb.njournal_buckets; 429 430 atomic_set(&ja->discard_in_flight, DISCARD_READY); 431 /* fallthrough */ 432 433 case DISCARD_READY: 434 if (ja->discard_idx == ja->last_idx) 435 return; 436 437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 438 439 bio_init(bio); 440 bio->bi_sector = bucket_to_sector(ca->set, 441 ca->sb.d[ja->discard_idx]); 442 bio->bi_bdev = ca->bdev; 443 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 444 bio->bi_max_vecs = 1; 445 bio->bi_io_vec = bio->bi_inline_vecs; 446 bio->bi_size = bucket_bytes(ca); 447 bio->bi_end_io = journal_discard_endio; 448 449 closure_get(&ca->set->cl); 450 INIT_WORK(&ja->discard_work, journal_discard_work); 451 schedule_work(&ja->discard_work); 452 } 453} 454 455static void journal_reclaim(struct cache_set *c) 456{ 457 struct bkey *k = &c->journal.key; 458 struct cache *ca; 459 uint64_t last_seq; 460 unsigned iter, n = 0; 461 atomic_t p; 462 463 while (!atomic_read(&fifo_front(&c->journal.pin))) 464 fifo_pop(&c->journal.pin, p); 465 466 last_seq = last_seq(&c->journal); 467 468 /* Update last_idx */ 469 470 for_each_cache(ca, c, iter) { 471 struct journal_device *ja = &ca->journal; 472 473 while (ja->last_idx != ja->cur_idx && 474 ja->seq[ja->last_idx] < last_seq) 475 ja->last_idx = (ja->last_idx + 1) % 476 ca->sb.njournal_buckets; 477 } 478 479 for_each_cache(ca, c, iter) 480 do_journal_discard(ca); 481 482 if (c->journal.blocks_free) 483 goto out; 484 485 /* 486 * Allocate: 487 * XXX: Sort by free journal space 488 */ 489 490 for_each_cache(ca, c, iter) { 491 struct journal_device *ja = &ca->journal; 492 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; 493 494 /* No space available on this device */ 495 if (next == ja->discard_idx) 496 continue; 497 498 ja->cur_idx = next; 499 k->ptr[n++] = PTR(0, 500 bucket_to_sector(c, ca->sb.d[ja->cur_idx]), 501 ca->sb.nr_this_dev); 502 } 503 504 bkey_init(k); 505 SET_KEY_PTRS(k, n); 506 507 if (n) 508 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; 509out: 510 if (!journal_full(&c->journal)) 511 __closure_wake_up(&c->journal.wait); 512} 513 514void bch_journal_next(struct journal *j) 515{ 516 atomic_t p = { 1 }; 517 518 j->cur = (j->cur == j->w) 519 ? &j->w[1] 520 : &j->w[0]; 521 522 /* 523 * The fifo_push() needs to happen at the same time as j->seq is 524 * incremented for last_seq() to be calculated correctly 525 */ 526 BUG_ON(!fifo_push(&j->pin, p)); 527 atomic_set(&fifo_back(&j->pin), 1); 528 529 j->cur->data->seq = ++j->seq; 530 j->cur->need_write = false; 531 j->cur->data->keys = 0; 532 533 if (fifo_full(&j->pin)) 534 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin)); 535} 536 537static void journal_write_endio(struct bio *bio, int error) 538{ 539 struct journal_write *w = bio->bi_private; 540 541 cache_set_err_on(error, w->c, "journal io error"); 542 closure_put(&w->c->journal.io); 543} 544 545static void journal_write(struct closure *); 546 547static void journal_write_done(struct closure *cl) 548{ 549 struct journal *j = container_of(cl, struct journal, io); 550 struct journal_write *w = (j->cur == j->w) 551 ? &j->w[1] 552 : &j->w[0]; 553 554 __closure_wake_up(&w->wait); 555 continue_at_nobarrier(cl, journal_write, system_wq); 556} 557 558static void journal_write_unlocked(struct closure *cl) 559 __releases(c->journal.lock) 560{ 561 struct cache_set *c = container_of(cl, struct cache_set, journal.io); 562 struct cache *ca; 563 struct journal_write *w = c->journal.cur; 564 struct bkey *k = &c->journal.key; 565 unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size; 566 567 struct bio *bio; 568 struct bio_list list; 569 bio_list_init(&list); 570 571 if (!w->need_write) { 572 /* 573 * XXX: have to unlock closure before we unlock journal lock, 574 * else we race with bch_journal(). But this way we race 575 * against cache set unregister. Doh. 576 */ 577 set_closure_fn(cl, NULL, NULL); 578 closure_sub(cl, CLOSURE_RUNNING + 1); 579 spin_unlock(&c->journal.lock); 580 return; 581 } else if (journal_full(&c->journal)) { 582 journal_reclaim(c); 583 spin_unlock(&c->journal.lock); 584 585 btree_flush_write(c); 586 continue_at(cl, journal_write, system_wq); 587 } 588 589 c->journal.blocks_free -= set_blocks(w->data, c); 590 591 w->data->btree_level = c->root->level; 592 593 bkey_copy(&w->data->btree_root, &c->root->key); 594 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket); 595 596 for_each_cache(ca, c, i) 597 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0]; 598 599 w->data->magic = jset_magic(c); 600 w->data->version = BCACHE_JSET_VERSION; 601 w->data->last_seq = last_seq(&c->journal); 602 w->data->csum = csum_set(w->data); 603 604 for (i = 0; i < KEY_PTRS(k); i++) { 605 ca = PTR_CACHE(c, k, i); 606 bio = &ca->journal.bio; 607 608 atomic_long_add(sectors, &ca->meta_sectors_written); 609 610 bio_reset(bio); 611 bio->bi_sector = PTR_OFFSET(k, i); 612 bio->bi_bdev = ca->bdev; 613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 614 bio->bi_size = sectors << 9; 615 616 bio->bi_end_io = journal_write_endio; 617 bio->bi_private = w; 618 bch_bio_map(bio, w->data); 619 620 trace_bcache_journal_write(bio); 621 bio_list_add(&list, bio); 622 623 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors); 624 625 ca->journal.seq[ca->journal.cur_idx] = w->data->seq; 626 } 627 628 atomic_dec_bug(&fifo_back(&c->journal.pin)); 629 bch_journal_next(&c->journal); 630 journal_reclaim(c); 631 632 spin_unlock(&c->journal.lock); 633 634 while ((bio = bio_list_pop(&list))) 635 closure_bio_submit(bio, cl, c->cache[0]); 636 637 continue_at(cl, journal_write_done, NULL); 638} 639 640static void journal_write(struct closure *cl) 641{ 642 struct cache_set *c = container_of(cl, struct cache_set, journal.io); 643 644 spin_lock(&c->journal.lock); 645 journal_write_unlocked(cl); 646} 647 648static void journal_try_write(struct cache_set *c) 649 __releases(c->journal.lock) 650{ 651 struct closure *cl = &c->journal.io; 652 struct journal_write *w = c->journal.cur; 653 654 w->need_write = true; 655 656 if (closure_trylock(cl, &c->cl)) 657 journal_write_unlocked(cl); 658 else 659 spin_unlock(&c->journal.lock); 660} 661 662static struct journal_write *journal_wait_for_write(struct cache_set *c, 663 unsigned nkeys) 664{ 665 size_t sectors; 666 struct closure cl; 667 668 closure_init_stack(&cl); 669 670 spin_lock(&c->journal.lock); 671 672 while (1) { 673 struct journal_write *w = c->journal.cur; 674 675 sectors = __set_blocks(w->data, w->data->keys + nkeys, 676 c) * c->sb.block_size; 677 678 if (sectors <= min_t(size_t, 679 c->journal.blocks_free * c->sb.block_size, 680 PAGE_SECTORS << JSET_BITS)) 681 return w; 682 683 /* XXX: tracepoint */ 684 if (!journal_full(&c->journal)) { 685 trace_bcache_journal_entry_full(c); 686 687 /* 688 * XXX: If we were inserting so many keys that they 689 * won't fit in an _empty_ journal write, we'll 690 * deadlock. For now, handle this in 691 * bch_keylist_realloc() - but something to think about. 692 */ 693 BUG_ON(!w->data->keys); 694 695 closure_wait(&w->wait, &cl); 696 journal_try_write(c); /* unlocks */ 697 } else { 698 trace_bcache_journal_full(c); 699 700 closure_wait(&c->journal.wait, &cl); 701 journal_reclaim(c); 702 spin_unlock(&c->journal.lock); 703 704 btree_flush_write(c); 705 } 706 707 closure_sync(&cl); 708 spin_lock(&c->journal.lock); 709 } 710} 711 712static void journal_write_work(struct work_struct *work) 713{ 714 struct cache_set *c = container_of(to_delayed_work(work), 715 struct cache_set, 716 journal.work); 717 spin_lock(&c->journal.lock); 718 journal_try_write(c); 719} 720 721/* 722 * Entry point to the journalling code - bio_insert() and btree_invalidate() 723 * pass bch_journal() a list of keys to be journalled, and then 724 * bch_journal() hands those same keys off to btree_insert_async() 725 */ 726 727atomic_t *bch_journal(struct cache_set *c, 728 struct keylist *keys, 729 struct closure *parent) 730{ 731 struct journal_write *w; 732 atomic_t *ret; 733 734 if (!CACHE_SYNC(&c->sb)) 735 return NULL; 736 737 w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); 738 739 memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys)); 740 w->data->keys += bch_keylist_nkeys(keys); 741 742 ret = &fifo_back(&c->journal.pin); 743 atomic_inc(ret); 744 745 if (parent) { 746 closure_wait(&w->wait, parent); 747 journal_try_write(c); 748 } else if (!w->need_write) { 749 schedule_delayed_work(&c->journal.work, 750 msecs_to_jiffies(c->journal_delay_ms)); 751 spin_unlock(&c->journal.lock); 752 } else { 753 spin_unlock(&c->journal.lock); 754 } 755 756 757 return ret; 758} 759 760void bch_journal_meta(struct cache_set *c, struct closure *cl) 761{ 762 struct keylist keys; 763 atomic_t *ref; 764 765 bch_keylist_init(&keys); 766 767 ref = bch_journal(c, &keys, cl); 768 if (ref) 769 atomic_dec_bug(ref); 770} 771 772void bch_journal_free(struct cache_set *c) 773{ 774 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS); 775 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS); 776 free_fifo(&c->journal.pin); 777} 778 779int bch_journal_alloc(struct cache_set *c) 780{ 781 struct journal *j = &c->journal; 782 783 closure_init_unlocked(&j->io); 784 spin_lock_init(&j->lock); 785 INIT_DELAYED_WORK(&j->work, journal_write_work); 786 787 c->journal_delay_ms = 100; 788 789 j->w[0].c = c; 790 j->w[1].c = c; 791 792 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || 793 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || 794 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) 795 return -ENOMEM; 796 797 return 0; 798} 799