journal.c revision 65ddf45a3102916fb622c71f7af158b19d49dc7f
1/*
2 * bcache journalling code, for btree insertions
3 *
4 * Copyright 2012 Google, Inc.
5 */
6
7#include "bcache.h"
8#include "btree.h"
9#include "debug.h"
10
11#include <trace/events/bcache.h>
12
13/*
14 * Journal replay/recovery:
15 *
16 * This code is all driven from run_cache_set(); we first read the journal
17 * entries, do some other stuff, then we mark all the keys in the journal
18 * entries (same as garbage collection would), then we replay them - reinserting
19 * them into the cache in precisely the same order as they appear in the
20 * journal.
21 *
22 * We only journal keys that go in leaf nodes, which simplifies things quite a
23 * bit.
24 */
25
26static void journal_read_endio(struct bio *bio, int error)
27{
28	struct closure *cl = bio->bi_private;
29	closure_put(cl);
30}
31
32static int journal_read_bucket(struct cache *ca, struct list_head *list,
33			       unsigned bucket_index)
34{
35	struct journal_device *ja = &ca->journal;
36	struct bio *bio = &ja->bio;
37
38	struct journal_replay *i;
39	struct jset *j, *data = ca->set->journal.w[0].data;
40	struct closure cl;
41	unsigned len, left, offset = 0;
42	int ret = 0;
43	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
44
45	closure_init_stack(&cl);
46
47	pr_debug("reading %u", bucket_index);
48
49	while (offset < ca->sb.bucket_size) {
50reread:		left = ca->sb.bucket_size - offset;
51		len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
52
53		bio_reset(bio);
54		bio->bi_iter.bi_sector	= bucket + offset;
55		bio->bi_bdev	= ca->bdev;
56		bio->bi_rw	= READ;
57		bio->bi_iter.bi_size	= len << 9;
58
59		bio->bi_end_io	= journal_read_endio;
60		bio->bi_private = &cl;
61		bch_bio_map(bio, data);
62
63		closure_bio_submit(bio, &cl, ca);
64		closure_sync(&cl);
65
66		/* This function could be simpler now since we no longer write
67		 * journal entries that overlap bucket boundaries; this means
68		 * the start of a bucket will always have a valid journal entry
69		 * if it has any journal entries at all.
70		 */
71
72		j = data;
73		while (len) {
74			struct list_head *where;
75			size_t blocks, bytes = set_bytes(j);
76
77			if (j->magic != jset_magic(&ca->sb)) {
78				pr_debug("%u: bad magic", bucket_index);
79				return ret;
80			}
81
82			if (bytes > left << 9 ||
83			    bytes > PAGE_SIZE << JSET_BITS) {
84				pr_info("%u: too big, %zu bytes, offset %u",
85					bucket_index, bytes, offset);
86				return ret;
87			}
88
89			if (bytes > len << 9)
90				goto reread;
91
92			if (j->csum != csum_set(j)) {
93				pr_info("%u: bad csum, %zu bytes, offset %u",
94					bucket_index, bytes, offset);
95				return ret;
96			}
97
98			blocks = set_blocks(j, block_bytes(ca->set));
99
100			while (!list_empty(list)) {
101				i = list_first_entry(list,
102					struct journal_replay, list);
103				if (i->j.seq >= j->last_seq)
104					break;
105				list_del(&i->list);
106				kfree(i);
107			}
108
109			list_for_each_entry_reverse(i, list, list) {
110				if (j->seq == i->j.seq)
111					goto next_set;
112
113				if (j->seq < i->j.last_seq)
114					goto next_set;
115
116				if (j->seq > i->j.seq) {
117					where = &i->list;
118					goto add;
119				}
120			}
121
122			where = list;
123add:
124			i = kmalloc(offsetof(struct journal_replay, j) +
125				    bytes, GFP_KERNEL);
126			if (!i)
127				return -ENOMEM;
128			memcpy(&i->j, j, bytes);
129			list_add(&i->list, where);
130			ret = 1;
131
132			ja->seq[bucket_index] = j->seq;
133next_set:
134			offset	+= blocks * ca->sb.block_size;
135			len	-= blocks * ca->sb.block_size;
136			j = ((void *) j) + blocks * block_bytes(ca);
137		}
138	}
139
140	return ret;
141}
142
143int bch_journal_read(struct cache_set *c, struct list_head *list)
144{
145#define read_bucket(b)							\
146	({								\
147		int ret = journal_read_bucket(ca, list, b);		\
148		__set_bit(b, bitmap);					\
149		if (ret < 0)						\
150			return ret;					\
151		ret;							\
152	})
153
154	struct cache *ca;
155	unsigned iter;
156
157	for_each_cache(ca, c, iter) {
158		struct journal_device *ja = &ca->journal;
159		unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
160		unsigned i, l, r, m;
161		uint64_t seq;
162
163		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
164		pr_debug("%u journal buckets", ca->sb.njournal_buckets);
165
166		/*
167		 * Read journal buckets ordered by golden ratio hash to quickly
168		 * find a sequence of buckets with valid journal entries
169		 */
170		for (i = 0; i < ca->sb.njournal_buckets; i++) {
171			l = (i * 2654435769U) % ca->sb.njournal_buckets;
172
173			if (test_bit(l, bitmap))
174				break;
175
176			if (read_bucket(l))
177				goto bsearch;
178		}
179
180		/*
181		 * If that fails, check all the buckets we haven't checked
182		 * already
183		 */
184		pr_debug("falling back to linear search");
185
186		for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
187		     l < ca->sb.njournal_buckets;
188		     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
189			if (read_bucket(l))
190				goto bsearch;
191
192		if (list_empty(list))
193			continue;
194bsearch:
195		/* Binary search */
196		m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
197		pr_debug("starting binary search, l %u r %u", l, r);
198
199		while (l + 1 < r) {
200			seq = list_entry(list->prev, struct journal_replay,
201					 list)->j.seq;
202
203			m = (l + r) >> 1;
204			read_bucket(m);
205
206			if (seq != list_entry(list->prev, struct journal_replay,
207					      list)->j.seq)
208				l = m;
209			else
210				r = m;
211		}
212
213		/*
214		 * Read buckets in reverse order until we stop finding more
215		 * journal entries
216		 */
217		pr_debug("finishing up: m %u njournal_buckets %u",
218			 m, ca->sb.njournal_buckets);
219		l = m;
220
221		while (1) {
222			if (!l--)
223				l = ca->sb.njournal_buckets - 1;
224
225			if (l == m)
226				break;
227
228			if (test_bit(l, bitmap))
229				continue;
230
231			if (!read_bucket(l))
232				break;
233		}
234
235		seq = 0;
236
237		for (i = 0; i < ca->sb.njournal_buckets; i++)
238			if (ja->seq[i] > seq) {
239				seq = ja->seq[i];
240				ja->cur_idx = ja->discard_idx =
241					ja->last_idx = i;
242
243			}
244	}
245
246	if (!list_empty(list))
247		c->journal.seq = list_entry(list->prev,
248					    struct journal_replay,
249					    list)->j.seq;
250
251	return 0;
252#undef read_bucket
253}
254
255void bch_journal_mark(struct cache_set *c, struct list_head *list)
256{
257	atomic_t p = { 0 };
258	struct bkey *k;
259	struct journal_replay *i;
260	struct journal *j = &c->journal;
261	uint64_t last = j->seq;
262
263	/*
264	 * journal.pin should never fill up - we never write a journal
265	 * entry when it would fill up. But if for some reason it does, we
266	 * iterate over the list in reverse order so that we can just skip that
267	 * refcount instead of bugging.
268	 */
269
270	list_for_each_entry_reverse(i, list, list) {
271		BUG_ON(last < i->j.seq);
272		i->pin = NULL;
273
274		while (last-- != i->j.seq)
275			if (fifo_free(&j->pin) > 1) {
276				fifo_push_front(&j->pin, p);
277				atomic_set(&fifo_front(&j->pin), 0);
278			}
279
280		if (fifo_free(&j->pin) > 1) {
281			fifo_push_front(&j->pin, p);
282			i->pin = &fifo_front(&j->pin);
283			atomic_set(i->pin, 1);
284		}
285
286		for (k = i->j.start;
287		     k < bset_bkey_last(&i->j);
288		     k = bkey_next(k)) {
289			unsigned j;
290			struct bucket *g;
291
292			for (j = 0; j < KEY_PTRS(k); j++) {
293				if (!ptr_available(c, k, j))
294					continue;
295
296				g = PTR_BUCKET(c, k, j);
297				atomic_inc(&g->pin);
298
299				if (g->prio == BTREE_PRIO &&
300				    !ptr_stale(c, k, j))
301					g->prio = INITIAL_PRIO;
302			}
303
304			__bch_btree_mark_key(c, 0, k);
305		}
306	}
307}
308
309int bch_journal_replay(struct cache_set *s, struct list_head *list)
310{
311	int ret = 0, keys = 0, entries = 0;
312	struct bkey *k;
313	struct journal_replay *i =
314		list_entry(list->prev, struct journal_replay, list);
315
316	uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
317	struct keylist keylist;
318
319	bch_keylist_init(&keylist);
320
321	list_for_each_entry(i, list, list) {
322		BUG_ON(i->pin && atomic_read(i->pin) != 1);
323
324		cache_set_err_on(n != i->j.seq, s,
325"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
326				 n, i->j.seq - 1, start, end);
327
328		for (k = i->j.start;
329		     k < bset_bkey_last(&i->j);
330		     k = bkey_next(k)) {
331			trace_bcache_journal_replay_key(k);
332
333			bkey_copy(keylist.top, k);
334			bch_keylist_push(&keylist);
335
336			ret = bch_btree_insert(s, &keylist, i->pin, NULL);
337			if (ret)
338				goto err;
339
340			BUG_ON(!bch_keylist_empty(&keylist));
341			keys++;
342
343			cond_resched();
344		}
345
346		if (i->pin)
347			atomic_dec(i->pin);
348		n = i->j.seq + 1;
349		entries++;
350	}
351
352	pr_info("journal replay done, %i keys in %i entries, seq %llu",
353		keys, entries, end);
354err:
355	while (!list_empty(list)) {
356		i = list_first_entry(list, struct journal_replay, list);
357		list_del(&i->list);
358		kfree(i);
359	}
360
361	return ret;
362}
363
364/* Journalling */
365
366static void btree_flush_write(struct cache_set *c)
367{
368	/*
369	 * Try to find the btree node with that references the oldest journal
370	 * entry, best is our current candidate and is locked if non NULL:
371	 */
372	struct btree *b, *best;
373	unsigned i;
374retry:
375	best = NULL;
376
377	for_each_cached_btree(b, c, i)
378		if (btree_current_write(b)->journal) {
379			if (!best)
380				best = b;
381			else if (journal_pin_cmp(c,
382					btree_current_write(best)->journal,
383					btree_current_write(b)->journal)) {
384				best = b;
385			}
386		}
387
388	b = best;
389	if (b) {
390		rw_lock(true, b, b->level);
391
392		if (!btree_current_write(b)->journal) {
393			rw_unlock(true, b);
394			/* We raced */
395			goto retry;
396		}
397
398		bch_btree_node_write(b, NULL);
399		rw_unlock(true, b);
400	}
401}
402
403#define last_seq(j)	((j)->seq - fifo_used(&(j)->pin) + 1)
404
405static void journal_discard_endio(struct bio *bio, int error)
406{
407	struct journal_device *ja =
408		container_of(bio, struct journal_device, discard_bio);
409	struct cache *ca = container_of(ja, struct cache, journal);
410
411	atomic_set(&ja->discard_in_flight, DISCARD_DONE);
412
413	closure_wake_up(&ca->set->journal.wait);
414	closure_put(&ca->set->cl);
415}
416
417static void journal_discard_work(struct work_struct *work)
418{
419	struct journal_device *ja =
420		container_of(work, struct journal_device, discard_work);
421
422	submit_bio(0, &ja->discard_bio);
423}
424
425static void do_journal_discard(struct cache *ca)
426{
427	struct journal_device *ja = &ca->journal;
428	struct bio *bio = &ja->discard_bio;
429
430	if (!ca->discard) {
431		ja->discard_idx = ja->last_idx;
432		return;
433	}
434
435	switch (atomic_read(&ja->discard_in_flight)) {
436	case DISCARD_IN_FLIGHT:
437		return;
438
439	case DISCARD_DONE:
440		ja->discard_idx = (ja->discard_idx + 1) %
441			ca->sb.njournal_buckets;
442
443		atomic_set(&ja->discard_in_flight, DISCARD_READY);
444		/* fallthrough */
445
446	case DISCARD_READY:
447		if (ja->discard_idx == ja->last_idx)
448			return;
449
450		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
451
452		bio_init(bio);
453		bio->bi_iter.bi_sector	= bucket_to_sector(ca->set,
454						ca->sb.d[ja->discard_idx]);
455		bio->bi_bdev		= ca->bdev;
456		bio->bi_rw		= REQ_WRITE|REQ_DISCARD;
457		bio->bi_max_vecs	= 1;
458		bio->bi_io_vec		= bio->bi_inline_vecs;
459		bio->bi_iter.bi_size	= bucket_bytes(ca);
460		bio->bi_end_io		= journal_discard_endio;
461
462		closure_get(&ca->set->cl);
463		INIT_WORK(&ja->discard_work, journal_discard_work);
464		schedule_work(&ja->discard_work);
465	}
466}
467
468static void journal_reclaim(struct cache_set *c)
469{
470	struct bkey *k = &c->journal.key;
471	struct cache *ca;
472	uint64_t last_seq;
473	unsigned iter, n = 0;
474	atomic_t p;
475
476	while (!atomic_read(&fifo_front(&c->journal.pin)))
477		fifo_pop(&c->journal.pin, p);
478
479	last_seq = last_seq(&c->journal);
480
481	/* Update last_idx */
482
483	for_each_cache(ca, c, iter) {
484		struct journal_device *ja = &ca->journal;
485
486		while (ja->last_idx != ja->cur_idx &&
487		       ja->seq[ja->last_idx] < last_seq)
488			ja->last_idx = (ja->last_idx + 1) %
489				ca->sb.njournal_buckets;
490	}
491
492	for_each_cache(ca, c, iter)
493		do_journal_discard(ca);
494
495	if (c->journal.blocks_free)
496		goto out;
497
498	/*
499	 * Allocate:
500	 * XXX: Sort by free journal space
501	 */
502
503	for_each_cache(ca, c, iter) {
504		struct journal_device *ja = &ca->journal;
505		unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
506
507		/* No space available on this device */
508		if (next == ja->discard_idx)
509			continue;
510
511		ja->cur_idx = next;
512		k->ptr[n++] = PTR(0,
513				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
514				  ca->sb.nr_this_dev);
515	}
516
517	bkey_init(k);
518	SET_KEY_PTRS(k, n);
519
520	if (n)
521		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
522out:
523	if (!journal_full(&c->journal))
524		__closure_wake_up(&c->journal.wait);
525}
526
527void bch_journal_next(struct journal *j)
528{
529	atomic_t p = { 1 };
530
531	j->cur = (j->cur == j->w)
532		? &j->w[1]
533		: &j->w[0];
534
535	/*
536	 * The fifo_push() needs to happen at the same time as j->seq is
537	 * incremented for last_seq() to be calculated correctly
538	 */
539	BUG_ON(!fifo_push(&j->pin, p));
540	atomic_set(&fifo_back(&j->pin), 1);
541
542	j->cur->data->seq	= ++j->seq;
543	j->cur->dirty		= false;
544	j->cur->need_write	= false;
545	j->cur->data->keys	= 0;
546
547	if (fifo_full(&j->pin))
548		pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
549}
550
551static void journal_write_endio(struct bio *bio, int error)
552{
553	struct journal_write *w = bio->bi_private;
554
555	cache_set_err_on(error, w->c, "journal io error");
556	closure_put(&w->c->journal.io);
557}
558
559static void journal_write(struct closure *);
560
561static void journal_write_done(struct closure *cl)
562{
563	struct journal *j = container_of(cl, struct journal, io);
564	struct journal_write *w = (j->cur == j->w)
565		? &j->w[1]
566		: &j->w[0];
567
568	__closure_wake_up(&w->wait);
569	continue_at_nobarrier(cl, journal_write, system_wq);
570}
571
572static void journal_write_unlock(struct closure *cl)
573{
574	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
575
576	c->journal.io_in_flight = 0;
577	spin_unlock(&c->journal.lock);
578}
579
580static void journal_write_unlocked(struct closure *cl)
581	__releases(c->journal.lock)
582{
583	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
584	struct cache *ca;
585	struct journal_write *w = c->journal.cur;
586	struct bkey *k = &c->journal.key;
587	unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
588		c->sb.block_size;
589
590	struct bio *bio;
591	struct bio_list list;
592	bio_list_init(&list);
593
594	if (!w->need_write) {
595		closure_return_with_destructor(cl, journal_write_unlock);
596	} else if (journal_full(&c->journal)) {
597		journal_reclaim(c);
598		spin_unlock(&c->journal.lock);
599
600		btree_flush_write(c);
601		continue_at(cl, journal_write, system_wq);
602	}
603
604	c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
605
606	w->data->btree_level = c->root->level;
607
608	bkey_copy(&w->data->btree_root, &c->root->key);
609	bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
610
611	for_each_cache(ca, c, i)
612		w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
613
614	w->data->magic		= jset_magic(&c->sb);
615	w->data->version	= BCACHE_JSET_VERSION;
616	w->data->last_seq	= last_seq(&c->journal);
617	w->data->csum		= csum_set(w->data);
618
619	for (i = 0; i < KEY_PTRS(k); i++) {
620		ca = PTR_CACHE(c, k, i);
621		bio = &ca->journal.bio;
622
623		atomic_long_add(sectors, &ca->meta_sectors_written);
624
625		bio_reset(bio);
626		bio->bi_iter.bi_sector	= PTR_OFFSET(k, i);
627		bio->bi_bdev	= ca->bdev;
628		bio->bi_rw	= REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
629		bio->bi_iter.bi_size = sectors << 9;
630
631		bio->bi_end_io	= journal_write_endio;
632		bio->bi_private = w;
633		bch_bio_map(bio, w->data);
634
635		trace_bcache_journal_write(bio);
636		bio_list_add(&list, bio);
637
638		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
639
640		ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
641	}
642
643	atomic_dec_bug(&fifo_back(&c->journal.pin));
644	bch_journal_next(&c->journal);
645	journal_reclaim(c);
646
647	spin_unlock(&c->journal.lock);
648
649	while ((bio = bio_list_pop(&list)))
650		closure_bio_submit(bio, cl, c->cache[0]);
651
652	continue_at(cl, journal_write_done, NULL);
653}
654
655static void journal_write(struct closure *cl)
656{
657	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
658
659	spin_lock(&c->journal.lock);
660	journal_write_unlocked(cl);
661}
662
663static void journal_try_write(struct cache_set *c)
664	__releases(c->journal.lock)
665{
666	struct closure *cl = &c->journal.io;
667	struct journal_write *w = c->journal.cur;
668
669	w->need_write = true;
670
671	if (!c->journal.io_in_flight) {
672		c->journal.io_in_flight = 1;
673		closure_call(cl, journal_write_unlocked, NULL, &c->cl);
674	} else {
675		spin_unlock(&c->journal.lock);
676	}
677}
678
679static struct journal_write *journal_wait_for_write(struct cache_set *c,
680						    unsigned nkeys)
681{
682	size_t sectors;
683	struct closure cl;
684	bool wait = false;
685
686	closure_init_stack(&cl);
687
688	spin_lock(&c->journal.lock);
689
690	while (1) {
691		struct journal_write *w = c->journal.cur;
692
693		sectors = __set_blocks(w->data, w->data->keys + nkeys,
694				       block_bytes(c)) * c->sb.block_size;
695
696		if (sectors <= min_t(size_t,
697				     c->journal.blocks_free * c->sb.block_size,
698				     PAGE_SECTORS << JSET_BITS))
699			return w;
700
701		if (wait)
702			closure_wait(&c->journal.wait, &cl);
703
704		if (!journal_full(&c->journal)) {
705			if (wait)
706				trace_bcache_journal_entry_full(c);
707
708			/*
709			 * XXX: If we were inserting so many keys that they
710			 * won't fit in an _empty_ journal write, we'll
711			 * deadlock. For now, handle this in
712			 * bch_keylist_realloc() - but something to think about.
713			 */
714			BUG_ON(!w->data->keys);
715
716			journal_try_write(c); /* unlocks */
717		} else {
718			if (wait)
719				trace_bcache_journal_full(c);
720
721			journal_reclaim(c);
722			spin_unlock(&c->journal.lock);
723
724			btree_flush_write(c);
725		}
726
727		closure_sync(&cl);
728		spin_lock(&c->journal.lock);
729		wait = true;
730	}
731}
732
733static void journal_write_work(struct work_struct *work)
734{
735	struct cache_set *c = container_of(to_delayed_work(work),
736					   struct cache_set,
737					   journal.work);
738	spin_lock(&c->journal.lock);
739	if (c->journal.cur->dirty)
740		journal_try_write(c);
741	else
742		spin_unlock(&c->journal.lock);
743}
744
745/*
746 * Entry point to the journalling code - bio_insert() and btree_invalidate()
747 * pass bch_journal() a list of keys to be journalled, and then
748 * bch_journal() hands those same keys off to btree_insert_async()
749 */
750
751atomic_t *bch_journal(struct cache_set *c,
752		      struct keylist *keys,
753		      struct closure *parent)
754{
755	struct journal_write *w;
756	atomic_t *ret;
757
758	if (!CACHE_SYNC(&c->sb))
759		return NULL;
760
761	w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
762
763	memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
764	w->data->keys += bch_keylist_nkeys(keys);
765
766	ret = &fifo_back(&c->journal.pin);
767	atomic_inc(ret);
768
769	if (parent) {
770		closure_wait(&w->wait, parent);
771		journal_try_write(c);
772	} else if (!w->dirty) {
773		w->dirty = true;
774		schedule_delayed_work(&c->journal.work,
775				      msecs_to_jiffies(c->journal_delay_ms));
776		spin_unlock(&c->journal.lock);
777	} else {
778		spin_unlock(&c->journal.lock);
779	}
780
781
782	return ret;
783}
784
785void bch_journal_meta(struct cache_set *c, struct closure *cl)
786{
787	struct keylist keys;
788	atomic_t *ref;
789
790	bch_keylist_init(&keys);
791
792	ref = bch_journal(c, &keys, cl);
793	if (ref)
794		atomic_dec_bug(ref);
795}
796
797void bch_journal_free(struct cache_set *c)
798{
799	free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
800	free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
801	free_fifo(&c->journal.pin);
802}
803
804int bch_journal_alloc(struct cache_set *c)
805{
806	struct journal *j = &c->journal;
807
808	spin_lock_init(&j->lock);
809	INIT_DELAYED_WORK(&j->work, journal_write_work);
810
811	c->journal_delay_ms = 100;
812
813	j->w[0].c = c;
814	j->w[1].c = c;
815
816	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
817	    !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
818	    !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
819		return -ENOMEM;
820
821	return 0;
822}
823