journal.c revision c13f3af9247db929fe1be86c0442ef161e615ac4
1/* 2 * bcache journalling code, for btree insertions 3 * 4 * Copyright 2012 Google, Inc. 5 */ 6 7#include "bcache.h" 8#include "btree.h" 9#include "debug.h" 10 11#include <trace/events/bcache.h> 12 13/* 14 * Journal replay/recovery: 15 * 16 * This code is all driven from run_cache_set(); we first read the journal 17 * entries, do some other stuff, then we mark all the keys in the journal 18 * entries (same as garbage collection would), then we replay them - reinserting 19 * them into the cache in precisely the same order as they appear in the 20 * journal. 21 * 22 * We only journal keys that go in leaf nodes, which simplifies things quite a 23 * bit. 24 */ 25 26static void journal_read_endio(struct bio *bio, int error) 27{ 28 struct closure *cl = bio->bi_private; 29 closure_put(cl); 30} 31 32static int journal_read_bucket(struct cache *ca, struct list_head *list, 33 unsigned bucket_index) 34{ 35 struct journal_device *ja = &ca->journal; 36 struct bio *bio = &ja->bio; 37 38 struct journal_replay *i; 39 struct jset *j, *data = ca->set->journal.w[0].data; 40 struct closure cl; 41 unsigned len, left, offset = 0; 42 int ret = 0; 43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); 44 45 closure_init_stack(&cl); 46 47 pr_debug("reading %u", bucket_index); 48 49 while (offset < ca->sb.bucket_size) { 50reread: left = ca->sb.bucket_size - offset; 51 len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS); 52 53 bio_reset(bio); 54 bio->bi_iter.bi_sector = bucket + offset; 55 bio->bi_bdev = ca->bdev; 56 bio->bi_rw = READ; 57 bio->bi_iter.bi_size = len << 9; 58 59 bio->bi_end_io = journal_read_endio; 60 bio->bi_private = &cl; 61 bch_bio_map(bio, data); 62 63 closure_bio_submit(bio, &cl, ca); 64 closure_sync(&cl); 65 66 /* This function could be simpler now since we no longer write 67 * journal entries that overlap bucket boundaries; this means 68 * the start of a bucket will always have a valid journal entry 69 * if it has any journal entries at all. 70 */ 71 72 j = data; 73 while (len) { 74 struct list_head *where; 75 size_t blocks, bytes = set_bytes(j); 76 77 if (j->magic != jset_magic(&ca->sb)) { 78 pr_debug("%u: bad magic", bucket_index); 79 return ret; 80 } 81 82 if (bytes > left << 9 || 83 bytes > PAGE_SIZE << JSET_BITS) { 84 pr_info("%u: too big, %zu bytes, offset %u", 85 bucket_index, bytes, offset); 86 return ret; 87 } 88 89 if (bytes > len << 9) 90 goto reread; 91 92 if (j->csum != csum_set(j)) { 93 pr_info("%u: bad csum, %zu bytes, offset %u", 94 bucket_index, bytes, offset); 95 return ret; 96 } 97 98 blocks = set_blocks(j, block_bytes(ca->set)); 99 100 while (!list_empty(list)) { 101 i = list_first_entry(list, 102 struct journal_replay, list); 103 if (i->j.seq >= j->last_seq) 104 break; 105 list_del(&i->list); 106 kfree(i); 107 } 108 109 list_for_each_entry_reverse(i, list, list) { 110 if (j->seq == i->j.seq) 111 goto next_set; 112 113 if (j->seq < i->j.last_seq) 114 goto next_set; 115 116 if (j->seq > i->j.seq) { 117 where = &i->list; 118 goto add; 119 } 120 } 121 122 where = list; 123add: 124 i = kmalloc(offsetof(struct journal_replay, j) + 125 bytes, GFP_KERNEL); 126 if (!i) 127 return -ENOMEM; 128 memcpy(&i->j, j, bytes); 129 list_add(&i->list, where); 130 ret = 1; 131 132 ja->seq[bucket_index] = j->seq; 133next_set: 134 offset += blocks * ca->sb.block_size; 135 len -= blocks * ca->sb.block_size; 136 j = ((void *) j) + blocks * block_bytes(ca); 137 } 138 } 139 140 return ret; 141} 142 143int bch_journal_read(struct cache_set *c, struct list_head *list) 144{ 145#define read_bucket(b) \ 146 ({ \ 147 int ret = journal_read_bucket(ca, list, b); \ 148 __set_bit(b, bitmap); \ 149 if (ret < 0) \ 150 return ret; \ 151 ret; \ 152 }) 153 154 struct cache *ca; 155 unsigned iter; 156 157 for_each_cache(ca, c, iter) { 158 struct journal_device *ja = &ca->journal; 159 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG]; 160 unsigned i, l, r, m; 161 uint64_t seq; 162 163 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 164 pr_debug("%u journal buckets", ca->sb.njournal_buckets); 165 166 /* 167 * Read journal buckets ordered by golden ratio hash to quickly 168 * find a sequence of buckets with valid journal entries 169 */ 170 for (i = 0; i < ca->sb.njournal_buckets; i++) { 171 l = (i * 2654435769U) % ca->sb.njournal_buckets; 172 173 if (test_bit(l, bitmap)) 174 break; 175 176 if (read_bucket(l)) 177 goto bsearch; 178 } 179 180 /* 181 * If that fails, check all the buckets we haven't checked 182 * already 183 */ 184 pr_debug("falling back to linear search"); 185 186 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); 187 l < ca->sb.njournal_buckets; 188 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1)) 189 if (read_bucket(l)) 190 goto bsearch; 191 192 if (list_empty(list)) 193 continue; 194bsearch: 195 /* Binary search */ 196 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 197 pr_debug("starting binary search, l %u r %u", l, r); 198 199 while (l + 1 < r) { 200 seq = list_entry(list->prev, struct journal_replay, 201 list)->j.seq; 202 203 m = (l + r) >> 1; 204 read_bucket(m); 205 206 if (seq != list_entry(list->prev, struct journal_replay, 207 list)->j.seq) 208 l = m; 209 else 210 r = m; 211 } 212 213 /* 214 * Read buckets in reverse order until we stop finding more 215 * journal entries 216 */ 217 pr_debug("finishing up: m %u njournal_buckets %u", 218 m, ca->sb.njournal_buckets); 219 l = m; 220 221 while (1) { 222 if (!l--) 223 l = ca->sb.njournal_buckets - 1; 224 225 if (l == m) 226 break; 227 228 if (test_bit(l, bitmap)) 229 continue; 230 231 if (!read_bucket(l)) 232 break; 233 } 234 235 seq = 0; 236 237 for (i = 0; i < ca->sb.njournal_buckets; i++) 238 if (ja->seq[i] > seq) { 239 seq = ja->seq[i]; 240 /* 241 * When journal_reclaim() goes to allocate for 242 * the first time, it'll use the bucket after 243 * ja->cur_idx 244 */ 245 ja->cur_idx = i; 246 ja->last_idx = ja->discard_idx = (i + 1) % 247 ca->sb.njournal_buckets; 248 249 } 250 } 251 252 if (!list_empty(list)) 253 c->journal.seq = list_entry(list->prev, 254 struct journal_replay, 255 list)->j.seq; 256 257 return 0; 258#undef read_bucket 259} 260 261void bch_journal_mark(struct cache_set *c, struct list_head *list) 262{ 263 atomic_t p = { 0 }; 264 struct bkey *k; 265 struct journal_replay *i; 266 struct journal *j = &c->journal; 267 uint64_t last = j->seq; 268 269 /* 270 * journal.pin should never fill up - we never write a journal 271 * entry when it would fill up. But if for some reason it does, we 272 * iterate over the list in reverse order so that we can just skip that 273 * refcount instead of bugging. 274 */ 275 276 list_for_each_entry_reverse(i, list, list) { 277 BUG_ON(last < i->j.seq); 278 i->pin = NULL; 279 280 while (last-- != i->j.seq) 281 if (fifo_free(&j->pin) > 1) { 282 fifo_push_front(&j->pin, p); 283 atomic_set(&fifo_front(&j->pin), 0); 284 } 285 286 if (fifo_free(&j->pin) > 1) { 287 fifo_push_front(&j->pin, p); 288 i->pin = &fifo_front(&j->pin); 289 atomic_set(i->pin, 1); 290 } 291 292 for (k = i->j.start; 293 k < bset_bkey_last(&i->j); 294 k = bkey_next(k)) { 295 unsigned j; 296 297 for (j = 0; j < KEY_PTRS(k); j++) 298 if (ptr_available(c, k, j)) 299 atomic_inc(&PTR_BUCKET(c, k, j)->pin); 300 301 bch_initial_mark_key(c, 0, k); 302 } 303 } 304} 305 306int bch_journal_replay(struct cache_set *s, struct list_head *list) 307{ 308 int ret = 0, keys = 0, entries = 0; 309 struct bkey *k; 310 struct journal_replay *i = 311 list_entry(list->prev, struct journal_replay, list); 312 313 uint64_t start = i->j.last_seq, end = i->j.seq, n = start; 314 struct keylist keylist; 315 316 list_for_each_entry(i, list, list) { 317 BUG_ON(i->pin && atomic_read(i->pin) != 1); 318 319 cache_set_err_on(n != i->j.seq, s, 320"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", 321 n, i->j.seq - 1, start, end); 322 323 for (k = i->j.start; 324 k < bset_bkey_last(&i->j); 325 k = bkey_next(k)) { 326 trace_bcache_journal_replay_key(k); 327 328 bch_keylist_init_single(&keylist, k); 329 330 ret = bch_btree_insert(s, &keylist, i->pin, NULL); 331 if (ret) 332 goto err; 333 334 BUG_ON(!bch_keylist_empty(&keylist)); 335 keys++; 336 337 cond_resched(); 338 } 339 340 if (i->pin) 341 atomic_dec(i->pin); 342 n = i->j.seq + 1; 343 entries++; 344 } 345 346 pr_info("journal replay done, %i keys in %i entries, seq %llu", 347 keys, entries, end); 348err: 349 while (!list_empty(list)) { 350 i = list_first_entry(list, struct journal_replay, list); 351 list_del(&i->list); 352 kfree(i); 353 } 354 355 return ret; 356} 357 358/* Journalling */ 359 360static void btree_flush_write(struct cache_set *c) 361{ 362 /* 363 * Try to find the btree node with that references the oldest journal 364 * entry, best is our current candidate and is locked if non NULL: 365 */ 366 struct btree *b, *best; 367 unsigned i; 368retry: 369 best = NULL; 370 371 for_each_cached_btree(b, c, i) 372 if (btree_current_write(b)->journal) { 373 if (!best) 374 best = b; 375 else if (journal_pin_cmp(c, 376 btree_current_write(best)->journal, 377 btree_current_write(b)->journal)) { 378 best = b; 379 } 380 } 381 382 b = best; 383 if (b) { 384 rw_lock(true, b, b->level); 385 386 if (!btree_current_write(b)->journal) { 387 rw_unlock(true, b); 388 /* We raced */ 389 goto retry; 390 } 391 392 bch_btree_node_write(b, NULL); 393 rw_unlock(true, b); 394 } 395} 396 397#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1) 398 399static void journal_discard_endio(struct bio *bio, int error) 400{ 401 struct journal_device *ja = 402 container_of(bio, struct journal_device, discard_bio); 403 struct cache *ca = container_of(ja, struct cache, journal); 404 405 atomic_set(&ja->discard_in_flight, DISCARD_DONE); 406 407 closure_wake_up(&ca->set->journal.wait); 408 closure_put(&ca->set->cl); 409} 410 411static void journal_discard_work(struct work_struct *work) 412{ 413 struct journal_device *ja = 414 container_of(work, struct journal_device, discard_work); 415 416 submit_bio(0, &ja->discard_bio); 417} 418 419static void do_journal_discard(struct cache *ca) 420{ 421 struct journal_device *ja = &ca->journal; 422 struct bio *bio = &ja->discard_bio; 423 424 if (!ca->discard) { 425 ja->discard_idx = ja->last_idx; 426 return; 427 } 428 429 switch (atomic_read(&ja->discard_in_flight)) { 430 case DISCARD_IN_FLIGHT: 431 return; 432 433 case DISCARD_DONE: 434 ja->discard_idx = (ja->discard_idx + 1) % 435 ca->sb.njournal_buckets; 436 437 atomic_set(&ja->discard_in_flight, DISCARD_READY); 438 /* fallthrough */ 439 440 case DISCARD_READY: 441 if (ja->discard_idx == ja->last_idx) 442 return; 443 444 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 445 446 bio_init(bio); 447 bio->bi_iter.bi_sector = bucket_to_sector(ca->set, 448 ca->sb.d[ja->discard_idx]); 449 bio->bi_bdev = ca->bdev; 450 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 451 bio->bi_max_vecs = 1; 452 bio->bi_io_vec = bio->bi_inline_vecs; 453 bio->bi_iter.bi_size = bucket_bytes(ca); 454 bio->bi_end_io = journal_discard_endio; 455 456 closure_get(&ca->set->cl); 457 INIT_WORK(&ja->discard_work, journal_discard_work); 458 schedule_work(&ja->discard_work); 459 } 460} 461 462static void journal_reclaim(struct cache_set *c) 463{ 464 struct bkey *k = &c->journal.key; 465 struct cache *ca; 466 uint64_t last_seq; 467 unsigned iter, n = 0; 468 atomic_t p; 469 470 while (!atomic_read(&fifo_front(&c->journal.pin))) 471 fifo_pop(&c->journal.pin, p); 472 473 last_seq = last_seq(&c->journal); 474 475 /* Update last_idx */ 476 477 for_each_cache(ca, c, iter) { 478 struct journal_device *ja = &ca->journal; 479 480 while (ja->last_idx != ja->cur_idx && 481 ja->seq[ja->last_idx] < last_seq) 482 ja->last_idx = (ja->last_idx + 1) % 483 ca->sb.njournal_buckets; 484 } 485 486 for_each_cache(ca, c, iter) 487 do_journal_discard(ca); 488 489 if (c->journal.blocks_free) 490 goto out; 491 492 /* 493 * Allocate: 494 * XXX: Sort by free journal space 495 */ 496 497 for_each_cache(ca, c, iter) { 498 struct journal_device *ja = &ca->journal; 499 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; 500 501 /* No space available on this device */ 502 if (next == ja->discard_idx) 503 continue; 504 505 ja->cur_idx = next; 506 k->ptr[n++] = PTR(0, 507 bucket_to_sector(c, ca->sb.d[ja->cur_idx]), 508 ca->sb.nr_this_dev); 509 } 510 511 bkey_init(k); 512 SET_KEY_PTRS(k, n); 513 514 if (n) 515 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; 516out: 517 if (!journal_full(&c->journal)) 518 __closure_wake_up(&c->journal.wait); 519} 520 521void bch_journal_next(struct journal *j) 522{ 523 atomic_t p = { 1 }; 524 525 j->cur = (j->cur == j->w) 526 ? &j->w[1] 527 : &j->w[0]; 528 529 /* 530 * The fifo_push() needs to happen at the same time as j->seq is 531 * incremented for last_seq() to be calculated correctly 532 */ 533 BUG_ON(!fifo_push(&j->pin, p)); 534 atomic_set(&fifo_back(&j->pin), 1); 535 536 j->cur->data->seq = ++j->seq; 537 j->cur->dirty = false; 538 j->cur->need_write = false; 539 j->cur->data->keys = 0; 540 541 if (fifo_full(&j->pin)) 542 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin)); 543} 544 545static void journal_write_endio(struct bio *bio, int error) 546{ 547 struct journal_write *w = bio->bi_private; 548 549 cache_set_err_on(error, w->c, "journal io error"); 550 closure_put(&w->c->journal.io); 551} 552 553static void journal_write(struct closure *); 554 555static void journal_write_done(struct closure *cl) 556{ 557 struct journal *j = container_of(cl, struct journal, io); 558 struct journal_write *w = (j->cur == j->w) 559 ? &j->w[1] 560 : &j->w[0]; 561 562 __closure_wake_up(&w->wait); 563 continue_at_nobarrier(cl, journal_write, system_wq); 564} 565 566static void journal_write_unlock(struct closure *cl) 567{ 568 struct cache_set *c = container_of(cl, struct cache_set, journal.io); 569 570 c->journal.io_in_flight = 0; 571 spin_unlock(&c->journal.lock); 572} 573 574static void journal_write_unlocked(struct closure *cl) 575 __releases(c->journal.lock) 576{ 577 struct cache_set *c = container_of(cl, struct cache_set, journal.io); 578 struct cache *ca; 579 struct journal_write *w = c->journal.cur; 580 struct bkey *k = &c->journal.key; 581 unsigned i, sectors = set_blocks(w->data, block_bytes(c)) * 582 c->sb.block_size; 583 584 struct bio *bio; 585 struct bio_list list; 586 bio_list_init(&list); 587 588 if (!w->need_write) { 589 closure_return_with_destructor(cl, journal_write_unlock); 590 } else if (journal_full(&c->journal)) { 591 journal_reclaim(c); 592 spin_unlock(&c->journal.lock); 593 594 btree_flush_write(c); 595 continue_at(cl, journal_write, system_wq); 596 } 597 598 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); 599 600 w->data->btree_level = c->root->level; 601 602 bkey_copy(&w->data->btree_root, &c->root->key); 603 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket); 604 605 for_each_cache(ca, c, i) 606 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0]; 607 608 w->data->magic = jset_magic(&c->sb); 609 w->data->version = BCACHE_JSET_VERSION; 610 w->data->last_seq = last_seq(&c->journal); 611 w->data->csum = csum_set(w->data); 612 613 for (i = 0; i < KEY_PTRS(k); i++) { 614 ca = PTR_CACHE(c, k, i); 615 bio = &ca->journal.bio; 616 617 atomic_long_add(sectors, &ca->meta_sectors_written); 618 619 bio_reset(bio); 620 bio->bi_iter.bi_sector = PTR_OFFSET(k, i); 621 bio->bi_bdev = ca->bdev; 622 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 623 bio->bi_iter.bi_size = sectors << 9; 624 625 bio->bi_end_io = journal_write_endio; 626 bio->bi_private = w; 627 bch_bio_map(bio, w->data); 628 629 trace_bcache_journal_write(bio); 630 bio_list_add(&list, bio); 631 632 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors); 633 634 ca->journal.seq[ca->journal.cur_idx] = w->data->seq; 635 } 636 637 atomic_dec_bug(&fifo_back(&c->journal.pin)); 638 bch_journal_next(&c->journal); 639 journal_reclaim(c); 640 641 spin_unlock(&c->journal.lock); 642 643 while ((bio = bio_list_pop(&list))) 644 closure_bio_submit(bio, cl, c->cache[0]); 645 646 continue_at(cl, journal_write_done, NULL); 647} 648 649static void journal_write(struct closure *cl) 650{ 651 struct cache_set *c = container_of(cl, struct cache_set, journal.io); 652 653 spin_lock(&c->journal.lock); 654 journal_write_unlocked(cl); 655} 656 657static void journal_try_write(struct cache_set *c) 658 __releases(c->journal.lock) 659{ 660 struct closure *cl = &c->journal.io; 661 struct journal_write *w = c->journal.cur; 662 663 w->need_write = true; 664 665 if (!c->journal.io_in_flight) { 666 c->journal.io_in_flight = 1; 667 closure_call(cl, journal_write_unlocked, NULL, &c->cl); 668 } else { 669 spin_unlock(&c->journal.lock); 670 } 671} 672 673static struct journal_write *journal_wait_for_write(struct cache_set *c, 674 unsigned nkeys) 675{ 676 size_t sectors; 677 struct closure cl; 678 bool wait = false; 679 680 closure_init_stack(&cl); 681 682 spin_lock(&c->journal.lock); 683 684 while (1) { 685 struct journal_write *w = c->journal.cur; 686 687 sectors = __set_blocks(w->data, w->data->keys + nkeys, 688 block_bytes(c)) * c->sb.block_size; 689 690 if (sectors <= min_t(size_t, 691 c->journal.blocks_free * c->sb.block_size, 692 PAGE_SECTORS << JSET_BITS)) 693 return w; 694 695 if (wait) 696 closure_wait(&c->journal.wait, &cl); 697 698 if (!journal_full(&c->journal)) { 699 if (wait) 700 trace_bcache_journal_entry_full(c); 701 702 /* 703 * XXX: If we were inserting so many keys that they 704 * won't fit in an _empty_ journal write, we'll 705 * deadlock. For now, handle this in 706 * bch_keylist_realloc() - but something to think about. 707 */ 708 BUG_ON(!w->data->keys); 709 710 journal_try_write(c); /* unlocks */ 711 } else { 712 if (wait) 713 trace_bcache_journal_full(c); 714 715 journal_reclaim(c); 716 spin_unlock(&c->journal.lock); 717 718 btree_flush_write(c); 719 } 720 721 closure_sync(&cl); 722 spin_lock(&c->journal.lock); 723 wait = true; 724 } 725} 726 727static void journal_write_work(struct work_struct *work) 728{ 729 struct cache_set *c = container_of(to_delayed_work(work), 730 struct cache_set, 731 journal.work); 732 spin_lock(&c->journal.lock); 733 if (c->journal.cur->dirty) 734 journal_try_write(c); 735 else 736 spin_unlock(&c->journal.lock); 737} 738 739/* 740 * Entry point to the journalling code - bio_insert() and btree_invalidate() 741 * pass bch_journal() a list of keys to be journalled, and then 742 * bch_journal() hands those same keys off to btree_insert_async() 743 */ 744 745atomic_t *bch_journal(struct cache_set *c, 746 struct keylist *keys, 747 struct closure *parent) 748{ 749 struct journal_write *w; 750 atomic_t *ret; 751 752 if (!CACHE_SYNC(&c->sb)) 753 return NULL; 754 755 w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); 756 757 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys)); 758 w->data->keys += bch_keylist_nkeys(keys); 759 760 ret = &fifo_back(&c->journal.pin); 761 atomic_inc(ret); 762 763 if (parent) { 764 closure_wait(&w->wait, parent); 765 journal_try_write(c); 766 } else if (!w->dirty) { 767 w->dirty = true; 768 schedule_delayed_work(&c->journal.work, 769 msecs_to_jiffies(c->journal_delay_ms)); 770 spin_unlock(&c->journal.lock); 771 } else { 772 spin_unlock(&c->journal.lock); 773 } 774 775 776 return ret; 777} 778 779void bch_journal_meta(struct cache_set *c, struct closure *cl) 780{ 781 struct keylist keys; 782 atomic_t *ref; 783 784 bch_keylist_init(&keys); 785 786 ref = bch_journal(c, &keys, cl); 787 if (ref) 788 atomic_dec_bug(ref); 789} 790 791void bch_journal_free(struct cache_set *c) 792{ 793 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS); 794 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS); 795 free_fifo(&c->journal.pin); 796} 797 798int bch_journal_alloc(struct cache_set *c) 799{ 800 struct journal *j = &c->journal; 801 802 spin_lock_init(&j->lock); 803 INIT_DELAYED_WORK(&j->work, journal_write_work); 804 805 c->journal_delay_ms = 100; 806 807 j->w[0].c = c; 808 j->w[1].c = c; 809 810 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || 811 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || 812 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) 813 return -ENOMEM; 814 815 return 0; 816} 817