journal.c revision 3d4492f81dd7b486f1be0616a1ce7f73760f406e
1/*
2** Write ahead logging implementation copyright Chris Mason 2000
3**
4** The background commits make this code very interelated, and
5** overly complex.  I need to rethink things a bit....The major players:
6**
7** journal_begin -- call with the number of blocks you expect to log.
8**                  If the current transaction is too
9** 		    old, it will block until the current transaction is
10** 		    finished, and then start a new one.
11**		    Usually, your transaction will get joined in with
12**                  previous ones for speed.
13**
14** journal_join  -- same as journal_begin, but won't block on the current
15**                  transaction regardless of age.  Don't ever call
16**                  this.  Ever.  There are only two places it should be
17**                  called from, and they are both inside this file.
18**
19** journal_mark_dirty -- adds blocks into this transaction.  clears any flags
20**                       that might make them get sent to disk
21**                       and then marks them BH_JDirty.  Puts the buffer head
22**                       into the current transaction hash.
23**
24** journal_end -- if the current transaction is batchable, it does nothing
25**                   otherwise, it could do an async/synchronous commit, or
26**                   a full flush of all log and real blocks in the
27**                   transaction.
28**
29** flush_old_commits -- if the current transaction is too old, it is ended and
30**                      commit blocks are sent to disk.  Forces commit blocks
31**                      to disk for all backgrounded commits that have been
32**                      around too long.
33**		     -- Note, if you call this as an immediate flush from
34**		        from within kupdate, it will ignore the immediate flag
35*/
36
37#include <linux/config.h>
38#include <asm/uaccess.h>
39#include <asm/system.h>
40
41#include <linux/time.h>
42#include <asm/semaphore.h>
43
44#include <linux/vmalloc.h>
45#include <linux/reiserfs_fs.h>
46
47#include <linux/kernel.h>
48#include <linux/errno.h>
49#include <linux/fcntl.h>
50#include <linux/stat.h>
51#include <linux/string.h>
52#include <linux/smp_lock.h>
53#include <linux/buffer_head.h>
54#include <linux/workqueue.h>
55#include <linux/writeback.h>
56#include <linux/blkdev.h>
57
58/* gets a struct reiserfs_journal_list * from a list head */
59#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
60                               j_list))
61#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
62                               j_working_list))
63
64/* the number of mounted filesystems.  This is used to decide when to
65** start and kill the commit workqueue
66*/
67static int reiserfs_mounted_fs_count;
68
69static struct workqueue_struct *commit_wq;
70
71#define JOURNAL_TRANS_HALF 1018	/* must be correct to keep the desc and commit
72				   structs at 4k */
73#define BUFNR 64		/*read ahead */
74
75/* cnode stat bits.  Move these into reiserfs_fs.h */
76
77#define BLOCK_FREED 2		/* this block was freed, and can't be written.  */
78#define BLOCK_FREED_HOLDER 3	/* this block was freed during this transaction, and can't be written */
79
80#define BLOCK_NEEDS_FLUSH 4	/* used in flush_journal_list */
81#define BLOCK_DIRTIED 5
82
83/* journal list state bits */
84#define LIST_TOUCHED 1
85#define LIST_DIRTY   2
86#define LIST_COMMIT_PENDING  4	/* someone will commit this list */
87
88/* flags for do_journal_end */
89#define FLUSH_ALL   1		/* flush commit and real blocks */
90#define COMMIT_NOW  2		/* end and commit this transaction */
91#define WAIT        4		/* wait for the log blocks to hit the disk */
92
93static int do_journal_end(struct reiserfs_transaction_handle *,
94			  struct super_block *, unsigned long nblocks,
95			  int flags);
96static int flush_journal_list(struct super_block *s,
97			      struct reiserfs_journal_list *jl, int flushall);
98static int flush_commit_list(struct super_block *s,
99			     struct reiserfs_journal_list *jl, int flushall);
100static int can_dirty(struct reiserfs_journal_cnode *cn);
101static int journal_join(struct reiserfs_transaction_handle *th,
102			struct super_block *p_s_sb, unsigned long nblocks);
103static int release_journal_dev(struct super_block *super,
104			       struct reiserfs_journal *journal);
105static int dirty_one_transaction(struct super_block *s,
106				 struct reiserfs_journal_list *jl);
107static void flush_async_commits(void *p);
108static void queue_log_writer(struct super_block *s);
109
110/* values for join in do_journal_begin_r */
111enum {
112	JBEGIN_REG = 0,		/* regular journal begin */
113	JBEGIN_JOIN = 1,	/* join the running transaction if at all possible */
114	JBEGIN_ABORT = 2,	/* called from cleanup code, ignores aborted flag */
115};
116
117static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
118			      struct super_block *p_s_sb,
119			      unsigned long nblocks, int join);
120
121static void init_journal_hash(struct super_block *p_s_sb)
122{
123	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
124	memset(journal->j_hash_table, 0,
125	       JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
126}
127
128/*
129** clears BH_Dirty and sticks the buffer on the clean list.  Called because I can't allow refile_buffer to
130** make schedule happen after I've freed a block.  Look at remove_from_transaction and journal_mark_freed for
131** more details.
132*/
133static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
134{
135	if (bh) {
136		clear_buffer_dirty(bh);
137		clear_buffer_journal_test(bh);
138	}
139	return 0;
140}
141
142static void disable_barrier(struct super_block *s)
143{
144	REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
145	printk("reiserfs: disabling flush barriers on %s\n",
146	       reiserfs_bdevname(s));
147}
148
149static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
150							 *p_s_sb)
151{
152	struct reiserfs_bitmap_node *bn;
153	static int id;
154
155	bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);
156	if (!bn) {
157		return NULL;
158	}
159	bn->data = kzalloc(p_s_sb->s_blocksize, GFP_NOFS);
160	if (!bn->data) {
161		kfree(bn);
162		return NULL;
163	}
164	bn->id = id++;
165	INIT_LIST_HEAD(&bn->list);
166	return bn;
167}
168
169static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb)
170{
171	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
172	struct reiserfs_bitmap_node *bn = NULL;
173	struct list_head *entry = journal->j_bitmap_nodes.next;
174
175	journal->j_used_bitmap_nodes++;
176      repeat:
177
178	if (entry != &journal->j_bitmap_nodes) {
179		bn = list_entry(entry, struct reiserfs_bitmap_node, list);
180		list_del(entry);
181		memset(bn->data, 0, p_s_sb->s_blocksize);
182		journal->j_free_bitmap_nodes--;
183		return bn;
184	}
185	bn = allocate_bitmap_node(p_s_sb);
186	if (!bn) {
187		yield();
188		goto repeat;
189	}
190	return bn;
191}
192static inline void free_bitmap_node(struct super_block *p_s_sb,
193				    struct reiserfs_bitmap_node *bn)
194{
195	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
196	journal->j_used_bitmap_nodes--;
197	if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
198		kfree(bn->data);
199		kfree(bn);
200	} else {
201		list_add(&bn->list, &journal->j_bitmap_nodes);
202		journal->j_free_bitmap_nodes++;
203	}
204}
205
206static void allocate_bitmap_nodes(struct super_block *p_s_sb)
207{
208	int i;
209	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
210	struct reiserfs_bitmap_node *bn = NULL;
211	for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
212		bn = allocate_bitmap_node(p_s_sb);
213		if (bn) {
214			list_add(&bn->list, &journal->j_bitmap_nodes);
215			journal->j_free_bitmap_nodes++;
216		} else {
217			break;	// this is ok, we'll try again when more are needed
218		}
219	}
220}
221
222static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
223				  struct reiserfs_list_bitmap *jb)
224{
225	int bmap_nr = block / (p_s_sb->s_blocksize << 3);
226	int bit_nr = block % (p_s_sb->s_blocksize << 3);
227
228	if (!jb->bitmaps[bmap_nr]) {
229		jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb);
230	}
231	set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
232	return 0;
233}
234
235static void cleanup_bitmap_list(struct super_block *p_s_sb,
236				struct reiserfs_list_bitmap *jb)
237{
238	int i;
239	if (jb->bitmaps == NULL)
240		return;
241
242	for (i = 0; i < SB_BMAP_NR(p_s_sb); i++) {
243		if (jb->bitmaps[i]) {
244			free_bitmap_node(p_s_sb, jb->bitmaps[i]);
245			jb->bitmaps[i] = NULL;
246		}
247	}
248}
249
250/*
251** only call this on FS unmount.
252*/
253static int free_list_bitmaps(struct super_block *p_s_sb,
254			     struct reiserfs_list_bitmap *jb_array)
255{
256	int i;
257	struct reiserfs_list_bitmap *jb;
258	for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
259		jb = jb_array + i;
260		jb->journal_list = NULL;
261		cleanup_bitmap_list(p_s_sb, jb);
262		vfree(jb->bitmaps);
263		jb->bitmaps = NULL;
264	}
265	return 0;
266}
267
268static int free_bitmap_nodes(struct super_block *p_s_sb)
269{
270	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
271	struct list_head *next = journal->j_bitmap_nodes.next;
272	struct reiserfs_bitmap_node *bn;
273
274	while (next != &journal->j_bitmap_nodes) {
275		bn = list_entry(next, struct reiserfs_bitmap_node, list);
276		list_del(next);
277		kfree(bn->data);
278		kfree(bn);
279		next = journal->j_bitmap_nodes.next;
280		journal->j_free_bitmap_nodes--;
281	}
282
283	return 0;
284}
285
286/*
287** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
288** jb_array is the array to be filled in.
289*/
290int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
291				   struct reiserfs_list_bitmap *jb_array,
292				   int bmap_nr)
293{
294	int i;
295	int failed = 0;
296	struct reiserfs_list_bitmap *jb;
297	int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
298
299	for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
300		jb = jb_array + i;
301		jb->journal_list = NULL;
302		jb->bitmaps = vmalloc(mem);
303		if (!jb->bitmaps) {
304			reiserfs_warning(p_s_sb,
305					 "clm-2000, unable to allocate bitmaps for journal lists");
306			failed = 1;
307			break;
308		}
309		memset(jb->bitmaps, 0, mem);
310	}
311	if (failed) {
312		free_list_bitmaps(p_s_sb, jb_array);
313		return -1;
314	}
315	return 0;
316}
317
318/*
319** find an available list bitmap.  If you can't find one, flush a commit list
320** and try again
321*/
322static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
323						    struct reiserfs_journal_list
324						    *jl)
325{
326	int i, j;
327	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
328	struct reiserfs_list_bitmap *jb = NULL;
329
330	for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
331		i = journal->j_list_bitmap_index;
332		journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
333		jb = journal->j_list_bitmap + i;
334		if (journal->j_list_bitmap[i].journal_list) {
335			flush_commit_list(p_s_sb,
336					  journal->j_list_bitmap[i].
337					  journal_list, 1);
338			if (!journal->j_list_bitmap[i].journal_list) {
339				break;
340			}
341		} else {
342			break;
343		}
344	}
345	if (jb->journal_list) {	/* double check to make sure if flushed correctly */
346		return NULL;
347	}
348	jb->journal_list = jl;
349	return jb;
350}
351
352/*
353** allocates a new chunk of X nodes, and links them all together as a list.
354** Uses the cnode->next and cnode->prev pointers
355** returns NULL on failure
356*/
357static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
358{
359	struct reiserfs_journal_cnode *head;
360	int i;
361	if (num_cnodes <= 0) {
362		return NULL;
363	}
364	head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
365	if (!head) {
366		return NULL;
367	}
368	memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
369	head[0].prev = NULL;
370	head[0].next = head + 1;
371	for (i = 1; i < num_cnodes; i++) {
372		head[i].prev = head + (i - 1);
373		head[i].next = head + (i + 1);	/* if last one, overwrite it after the if */
374	}
375	head[num_cnodes - 1].next = NULL;
376	return head;
377}
378
379/*
380** pulls a cnode off the free list, or returns NULL on failure
381*/
382static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
383{
384	struct reiserfs_journal_cnode *cn;
385	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
386
387	reiserfs_check_lock_depth(p_s_sb, "get_cnode");
388
389	if (journal->j_cnode_free <= 0) {
390		return NULL;
391	}
392	journal->j_cnode_used++;
393	journal->j_cnode_free--;
394	cn = journal->j_cnode_free_list;
395	if (!cn) {
396		return cn;
397	}
398	if (cn->next) {
399		cn->next->prev = NULL;
400	}
401	journal->j_cnode_free_list = cn->next;
402	memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
403	return cn;
404}
405
406/*
407** returns a cnode to the free list
408*/
409static void free_cnode(struct super_block *p_s_sb,
410		       struct reiserfs_journal_cnode *cn)
411{
412	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
413
414	reiserfs_check_lock_depth(p_s_sb, "free_cnode");
415
416	journal->j_cnode_used--;
417	journal->j_cnode_free++;
418	/* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
419	cn->next = journal->j_cnode_free_list;
420	if (journal->j_cnode_free_list) {
421		journal->j_cnode_free_list->prev = cn;
422	}
423	cn->prev = NULL;	/* not needed with the memset, but I might kill the memset, and forget to do this */
424	journal->j_cnode_free_list = cn;
425}
426
427static void clear_prepared_bits(struct buffer_head *bh)
428{
429	clear_buffer_journal_prepared(bh);
430	clear_buffer_journal_restore_dirty(bh);
431}
432
433/* utility function to force a BUG if it is called without the big
434** kernel lock held.  caller is the string printed just before calling BUG()
435*/
436void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
437{
438#ifdef CONFIG_SMP
439	if (current->lock_depth < 0) {
440		reiserfs_panic(sb, "%s called without kernel lock held",
441			       caller);
442	}
443#else
444	;
445#endif
446}
447
448/* return a cnode with same dev, block number and size in table, or null if not found */
449static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
450								  super_block
451								  *sb,
452								  struct
453								  reiserfs_journal_cnode
454								  **table,
455								  long bl)
456{
457	struct reiserfs_journal_cnode *cn;
458	cn = journal_hash(table, sb, bl);
459	while (cn) {
460		if (cn->blocknr == bl && cn->sb == sb)
461			return cn;
462		cn = cn->hnext;
463	}
464	return (struct reiserfs_journal_cnode *)0;
465}
466
467/*
468** this actually means 'can this block be reallocated yet?'.  If you set search_all, a block can only be allocated
469** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
470** being overwritten by a replay after crashing.
471**
472** If you don't set search_all, a block can only be allocated if it is not in the current transaction.  Since deleting
473** a block removes it from the current transaction, this case should never happen.  If you don't set search_all, make
474** sure you never write the block without logging it.
475**
476** next_zero_bit is a suggestion about the next block to try for find_forward.
477** when bl is rejected because it is set in a journal list bitmap, we search
478** for the next zero bit in the bitmap that rejected bl.  Then, we return that
479** through next_zero_bit for find_forward to try.
480**
481** Just because we return something in next_zero_bit does not mean we won't
482** reject it on the next call to reiserfs_in_journal
483**
484*/
485int reiserfs_in_journal(struct super_block *p_s_sb,
486			int bmap_nr, int bit_nr, int search_all,
487			b_blocknr_t * next_zero_bit)
488{
489	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
490	struct reiserfs_journal_cnode *cn;
491	struct reiserfs_list_bitmap *jb;
492	int i;
493	unsigned long bl;
494
495	*next_zero_bit = 0;	/* always start this at zero. */
496
497	PROC_INFO_INC(p_s_sb, journal.in_journal);
498	/* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
499	 ** if we crash before the transaction that freed it commits,  this transaction won't
500	 ** have committed either, and the block will never be written
501	 */
502	if (search_all) {
503		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
504			PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap);
505			jb = journal->j_list_bitmap + i;
506			if (jb->journal_list && jb->bitmaps[bmap_nr] &&
507			    test_bit(bit_nr,
508				     (unsigned long *)jb->bitmaps[bmap_nr]->
509				     data)) {
510				*next_zero_bit =
511				    find_next_zero_bit((unsigned long *)
512						       (jb->bitmaps[bmap_nr]->
513							data),
514						       p_s_sb->s_blocksize << 3,
515						       bit_nr + 1);
516				return 1;
517			}
518		}
519	}
520
521	bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
522	/* is it in any old transactions? */
523	if (search_all
524	    && (cn =
525		get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
526		return 1;
527	}
528
529	/* is it in the current transaction.  This should never happen */
530	if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
531		BUG();
532		return 1;
533	}
534
535	PROC_INFO_INC(p_s_sb, journal.in_journal_reusable);
536	/* safe for reuse */
537	return 0;
538}
539
540/* insert cn into table
541*/
542static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
543				       struct reiserfs_journal_cnode *cn)
544{
545	struct reiserfs_journal_cnode *cn_orig;
546
547	cn_orig = journal_hash(table, cn->sb, cn->blocknr);
548	cn->hnext = cn_orig;
549	cn->hprev = NULL;
550	if (cn_orig) {
551		cn_orig->hprev = cn;
552	}
553	journal_hash(table, cn->sb, cn->blocknr) = cn;
554}
555
556/* lock the current transaction */
557static inline void lock_journal(struct super_block *p_s_sb)
558{
559	PROC_INFO_INC(p_s_sb, journal.lock_journal);
560	down(&SB_JOURNAL(p_s_sb)->j_lock);
561}
562
563/* unlock the current transaction */
564static inline void unlock_journal(struct super_block *p_s_sb)
565{
566	up(&SB_JOURNAL(p_s_sb)->j_lock);
567}
568
569static inline void get_journal_list(struct reiserfs_journal_list *jl)
570{
571	jl->j_refcount++;
572}
573
574static inline void put_journal_list(struct super_block *s,
575				    struct reiserfs_journal_list *jl)
576{
577	if (jl->j_refcount < 1) {
578		reiserfs_panic(s, "trans id %lu, refcount at %d",
579			       jl->j_trans_id, jl->j_refcount);
580	}
581	if (--jl->j_refcount == 0)
582		kfree(jl);
583}
584
585/*
586** this used to be much more involved, and I'm keeping it just in case things get ugly again.
587** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
588** transaction.
589*/
590static void cleanup_freed_for_journal_list(struct super_block *p_s_sb,
591					   struct reiserfs_journal_list *jl)
592{
593
594	struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
595	if (jb) {
596		cleanup_bitmap_list(p_s_sb, jb);
597	}
598	jl->j_list_bitmap->journal_list = NULL;
599	jl->j_list_bitmap = NULL;
600}
601
602static int journal_list_still_alive(struct super_block *s,
603				    unsigned long trans_id)
604{
605	struct reiserfs_journal *journal = SB_JOURNAL(s);
606	struct list_head *entry = &journal->j_journal_list;
607	struct reiserfs_journal_list *jl;
608
609	if (!list_empty(entry)) {
610		jl = JOURNAL_LIST_ENTRY(entry->next);
611		if (jl->j_trans_id <= trans_id) {
612			return 1;
613		}
614	}
615	return 0;
616}
617
618static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
619{
620	char b[BDEVNAME_SIZE];
621
622	if (buffer_journaled(bh)) {
623		reiserfs_warning(NULL,
624				 "clm-2084: pinned buffer %lu:%s sent to disk",
625				 bh->b_blocknr, bdevname(bh->b_bdev, b));
626	}
627	if (uptodate)
628		set_buffer_uptodate(bh);
629	else
630		clear_buffer_uptodate(bh);
631	unlock_buffer(bh);
632	put_bh(bh);
633}
634
635static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
636{
637	if (uptodate)
638		set_buffer_uptodate(bh);
639	else
640		clear_buffer_uptodate(bh);
641	unlock_buffer(bh);
642	put_bh(bh);
643}
644
645static void submit_logged_buffer(struct buffer_head *bh)
646{
647	get_bh(bh);
648	bh->b_end_io = reiserfs_end_buffer_io_sync;
649	clear_buffer_journal_new(bh);
650	clear_buffer_dirty(bh);
651	if (!test_clear_buffer_journal_test(bh))
652		BUG();
653	if (!buffer_uptodate(bh))
654		BUG();
655	submit_bh(WRITE, bh);
656}
657
658static void submit_ordered_buffer(struct buffer_head *bh)
659{
660	get_bh(bh);
661	bh->b_end_io = reiserfs_end_ordered_io;
662	clear_buffer_dirty(bh);
663	if (!buffer_uptodate(bh))
664		BUG();
665	submit_bh(WRITE, bh);
666}
667
668static int submit_barrier_buffer(struct buffer_head *bh)
669{
670	get_bh(bh);
671	bh->b_end_io = reiserfs_end_ordered_io;
672	clear_buffer_dirty(bh);
673	if (!buffer_uptodate(bh))
674		BUG();
675	return submit_bh(WRITE_BARRIER, bh);
676}
677
678static void check_barrier_completion(struct super_block *s,
679				     struct buffer_head *bh)
680{
681	if (buffer_eopnotsupp(bh)) {
682		clear_buffer_eopnotsupp(bh);
683		disable_barrier(s);
684		set_buffer_uptodate(bh);
685		set_buffer_dirty(bh);
686		sync_dirty_buffer(bh);
687	}
688}
689
690#define CHUNK_SIZE 32
691struct buffer_chunk {
692	struct buffer_head *bh[CHUNK_SIZE];
693	int nr;
694};
695
696static void write_chunk(struct buffer_chunk *chunk)
697{
698	int i;
699	get_fs_excl();
700	for (i = 0; i < chunk->nr; i++) {
701		submit_logged_buffer(chunk->bh[i]);
702	}
703	chunk->nr = 0;
704	put_fs_excl();
705}
706
707static void write_ordered_chunk(struct buffer_chunk *chunk)
708{
709	int i;
710	get_fs_excl();
711	for (i = 0; i < chunk->nr; i++) {
712		submit_ordered_buffer(chunk->bh[i]);
713	}
714	chunk->nr = 0;
715	put_fs_excl();
716}
717
718static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
719			spinlock_t * lock, void (fn) (struct buffer_chunk *))
720{
721	int ret = 0;
722	if (chunk->nr >= CHUNK_SIZE)
723		BUG();
724	chunk->bh[chunk->nr++] = bh;
725	if (chunk->nr >= CHUNK_SIZE) {
726		ret = 1;
727		if (lock)
728			spin_unlock(lock);
729		fn(chunk);
730		if (lock)
731			spin_lock(lock);
732	}
733	return ret;
734}
735
736static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
737static struct reiserfs_jh *alloc_jh(void)
738{
739	struct reiserfs_jh *jh;
740	while (1) {
741		jh = kmalloc(sizeof(*jh), GFP_NOFS);
742		if (jh) {
743			atomic_inc(&nr_reiserfs_jh);
744			return jh;
745		}
746		yield();
747	}
748}
749
750/*
751 * we want to free the jh when the buffer has been written
752 * and waited on
753 */
754void reiserfs_free_jh(struct buffer_head *bh)
755{
756	struct reiserfs_jh *jh;
757
758	jh = bh->b_private;
759	if (jh) {
760		bh->b_private = NULL;
761		jh->bh = NULL;
762		list_del_init(&jh->list);
763		kfree(jh);
764		if (atomic_read(&nr_reiserfs_jh) <= 0)
765			BUG();
766		atomic_dec(&nr_reiserfs_jh);
767		put_bh(bh);
768	}
769}
770
771static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
772			   int tail)
773{
774	struct reiserfs_jh *jh;
775
776	if (bh->b_private) {
777		spin_lock(&j->j_dirty_buffers_lock);
778		if (!bh->b_private) {
779			spin_unlock(&j->j_dirty_buffers_lock);
780			goto no_jh;
781		}
782		jh = bh->b_private;
783		list_del_init(&jh->list);
784	} else {
785	      no_jh:
786		get_bh(bh);
787		jh = alloc_jh();
788		spin_lock(&j->j_dirty_buffers_lock);
789		/* buffer must be locked for __add_jh, should be able to have
790		 * two adds at the same time
791		 */
792		if (bh->b_private)
793			BUG();
794		jh->bh = bh;
795		bh->b_private = jh;
796	}
797	jh->jl = j->j_current_jl;
798	if (tail)
799		list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
800	else {
801		list_add_tail(&jh->list, &jh->jl->j_bh_list);
802	}
803	spin_unlock(&j->j_dirty_buffers_lock);
804	return 0;
805}
806
807int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
808{
809	return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
810}
811int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
812{
813	return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
814}
815
816#define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
817static int write_ordered_buffers(spinlock_t * lock,
818				 struct reiserfs_journal *j,
819				 struct reiserfs_journal_list *jl,
820				 struct list_head *list)
821{
822	struct buffer_head *bh;
823	struct reiserfs_jh *jh;
824	int ret = j->j_errno;
825	struct buffer_chunk chunk;
826	struct list_head tmp;
827	INIT_LIST_HEAD(&tmp);
828
829	chunk.nr = 0;
830	spin_lock(lock);
831	while (!list_empty(list)) {
832		jh = JH_ENTRY(list->next);
833		bh = jh->bh;
834		get_bh(bh);
835		if (test_set_buffer_locked(bh)) {
836			if (!buffer_dirty(bh)) {
837				list_del_init(&jh->list);
838				list_add(&jh->list, &tmp);
839				goto loop_next;
840			}
841			spin_unlock(lock);
842			if (chunk.nr)
843				write_ordered_chunk(&chunk);
844			wait_on_buffer(bh);
845			cond_resched();
846			spin_lock(lock);
847			goto loop_next;
848		}
849		/* in theory, dirty non-uptodate buffers should never get here,
850		 * but the upper layer io error paths still have a few quirks.
851		 * Handle them here as gracefully as we can
852		 */
853		if (!buffer_uptodate(bh) && buffer_dirty(bh)) {
854			clear_buffer_dirty(bh);
855			ret = -EIO;
856		}
857		if (buffer_dirty(bh)) {
858			list_del_init(&jh->list);
859			list_add(&jh->list, &tmp);
860			add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
861		} else {
862			reiserfs_free_jh(bh);
863			unlock_buffer(bh);
864		}
865	      loop_next:
866		put_bh(bh);
867		cond_resched_lock(lock);
868	}
869	if (chunk.nr) {
870		spin_unlock(lock);
871		write_ordered_chunk(&chunk);
872		spin_lock(lock);
873	}
874	while (!list_empty(&tmp)) {
875		jh = JH_ENTRY(tmp.prev);
876		bh = jh->bh;
877		get_bh(bh);
878		reiserfs_free_jh(bh);
879
880		if (buffer_locked(bh)) {
881			spin_unlock(lock);
882			wait_on_buffer(bh);
883			spin_lock(lock);
884		}
885		if (!buffer_uptodate(bh)) {
886			ret = -EIO;
887		}
888		/* ugly interaction with invalidatepage here.
889		 * reiserfs_invalidate_page will pin any buffer that has a valid
890		 * journal head from an older transaction.  If someone else sets
891		 * our buffer dirty after we write it in the first loop, and
892		 * then someone truncates the page away, nobody will ever write
893		 * the buffer. We're safe if we write the page one last time
894		 * after freeing the journal header.
895		 */
896		if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
897			spin_unlock(lock);
898			ll_rw_block(WRITE, 1, &bh);
899			spin_lock(lock);
900		}
901		put_bh(bh);
902		cond_resched_lock(lock);
903	}
904	spin_unlock(lock);
905	return ret;
906}
907
908static int flush_older_commits(struct super_block *s,
909			       struct reiserfs_journal_list *jl)
910{
911	struct reiserfs_journal *journal = SB_JOURNAL(s);
912	struct reiserfs_journal_list *other_jl;
913	struct reiserfs_journal_list *first_jl;
914	struct list_head *entry;
915	unsigned long trans_id = jl->j_trans_id;
916	unsigned long other_trans_id;
917	unsigned long first_trans_id;
918
919      find_first:
920	/*
921	 * first we walk backwards to find the oldest uncommitted transation
922	 */
923	first_jl = jl;
924	entry = jl->j_list.prev;
925	while (1) {
926		other_jl = JOURNAL_LIST_ENTRY(entry);
927		if (entry == &journal->j_journal_list ||
928		    atomic_read(&other_jl->j_older_commits_done))
929			break;
930
931		first_jl = other_jl;
932		entry = other_jl->j_list.prev;
933	}
934
935	/* if we didn't find any older uncommitted transactions, return now */
936	if (first_jl == jl) {
937		return 0;
938	}
939
940	first_trans_id = first_jl->j_trans_id;
941
942	entry = &first_jl->j_list;
943	while (1) {
944		other_jl = JOURNAL_LIST_ENTRY(entry);
945		other_trans_id = other_jl->j_trans_id;
946
947		if (other_trans_id < trans_id) {
948			if (atomic_read(&other_jl->j_commit_left) != 0) {
949				flush_commit_list(s, other_jl, 0);
950
951				/* list we were called with is gone, return */
952				if (!journal_list_still_alive(s, trans_id))
953					return 1;
954
955				/* the one we just flushed is gone, this means all
956				 * older lists are also gone, so first_jl is no longer
957				 * valid either.  Go back to the beginning.
958				 */
959				if (!journal_list_still_alive
960				    (s, other_trans_id)) {
961					goto find_first;
962				}
963			}
964			entry = entry->next;
965			if (entry == &journal->j_journal_list)
966				return 0;
967		} else {
968			return 0;
969		}
970	}
971	return 0;
972}
973int reiserfs_async_progress_wait(struct super_block *s)
974{
975	DEFINE_WAIT(wait);
976	struct reiserfs_journal *j = SB_JOURNAL(s);
977	if (atomic_read(&j->j_async_throttle))
978		blk_congestion_wait(WRITE, HZ / 10);
979	return 0;
980}
981
982/*
983** if this journal list still has commit blocks unflushed, send them to disk.
984**
985** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
986** Before the commit block can by written, every other log block must be safely on disk
987**
988*/
989static int flush_commit_list(struct super_block *s,
990			     struct reiserfs_journal_list *jl, int flushall)
991{
992	int i;
993	int bn;
994	struct buffer_head *tbh = NULL;
995	unsigned long trans_id = jl->j_trans_id;
996	struct reiserfs_journal *journal = SB_JOURNAL(s);
997	int barrier = 0;
998	int retval = 0;
999	int write_len;
1000
1001	reiserfs_check_lock_depth(s, "flush_commit_list");
1002
1003	if (atomic_read(&jl->j_older_commits_done)) {
1004		return 0;
1005	}
1006
1007	get_fs_excl();
1008
1009	/* before we can put our commit blocks on disk, we have to make sure everyone older than
1010	 ** us is on disk too
1011	 */
1012	BUG_ON(jl->j_len <= 0);
1013	BUG_ON(trans_id == journal->j_trans_id);
1014
1015	get_journal_list(jl);
1016	if (flushall) {
1017		if (flush_older_commits(s, jl) == 1) {
1018			/* list disappeared during flush_older_commits.  return */
1019			goto put_jl;
1020		}
1021	}
1022
1023	/* make sure nobody is trying to flush this one at the same time */
1024	down(&jl->j_commit_lock);
1025	if (!journal_list_still_alive(s, trans_id)) {
1026		up(&jl->j_commit_lock);
1027		goto put_jl;
1028	}
1029	BUG_ON(jl->j_trans_id == 0);
1030
1031	/* this commit is done, exit */
1032	if (atomic_read(&(jl->j_commit_left)) <= 0) {
1033		if (flushall) {
1034			atomic_set(&(jl->j_older_commits_done), 1);
1035		}
1036		up(&jl->j_commit_lock);
1037		goto put_jl;
1038	}
1039
1040	if (!list_empty(&jl->j_bh_list)) {
1041		int ret;
1042		unlock_kernel();
1043		ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1044					    journal, jl, &jl->j_bh_list);
1045		if (ret < 0 && retval == 0)
1046			retval = ret;
1047		lock_kernel();
1048	}
1049	BUG_ON(!list_empty(&jl->j_bh_list));
1050	/*
1051	 * for the description block and all the log blocks, submit any buffers
1052	 * that haven't already reached the disk.  Try to write at least 256
1053	 * log blocks. later on, we will only wait on blocks that correspond
1054	 * to this transaction, but while we're unplugging we might as well
1055	 * get a chunk of data on there.
1056	 */
1057	atomic_inc(&journal->j_async_throttle);
1058	write_len = jl->j_len + 1;
1059	if (write_len < 256)
1060		write_len = 256;
1061	for (i = 0 ; i < write_len ; i++) {
1062		bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1063		    SB_ONDISK_JOURNAL_SIZE(s);
1064		tbh = journal_find_get_block(s, bn);
1065		if (tbh) {
1066			if (buffer_dirty(tbh))
1067			    ll_rw_block(WRITE, 1, &tbh) ;
1068			put_bh(tbh) ;
1069		}
1070	}
1071	atomic_dec(&journal->j_async_throttle);
1072
1073	/* We're skipping the commit if there's an error */
1074	if (retval || reiserfs_is_journal_aborted(journal))
1075		barrier = 0;
1076
1077	/* wait on everything written so far before writing the commit
1078	 * if we are in barrier mode, send the commit down now
1079	 */
1080	barrier = reiserfs_barrier_flush(s);
1081	if (barrier) {
1082		int ret;
1083		lock_buffer(jl->j_commit_bh);
1084		ret = submit_barrier_buffer(jl->j_commit_bh);
1085		if (ret == -EOPNOTSUPP) {
1086			set_buffer_uptodate(jl->j_commit_bh);
1087			disable_barrier(s);
1088			barrier = 0;
1089		}
1090	}
1091	for (i = 0; i < (jl->j_len + 1); i++) {
1092		bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1093		    (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1094		tbh = journal_find_get_block(s, bn);
1095		wait_on_buffer(tbh);
1096		// since we're using ll_rw_blk above, it might have skipped over
1097		// a locked buffer.  Double check here
1098		//
1099		if (buffer_dirty(tbh))	/* redundant, sync_dirty_buffer() checks */
1100			sync_dirty_buffer(tbh);
1101		if (unlikely(!buffer_uptodate(tbh))) {
1102#ifdef CONFIG_REISERFS_CHECK
1103			reiserfs_warning(s, "journal-601, buffer write failed");
1104#endif
1105			retval = -EIO;
1106		}
1107		put_bh(tbh);	/* once for journal_find_get_block */
1108		put_bh(tbh);	/* once due to original getblk in do_journal_end */
1109		atomic_dec(&(jl->j_commit_left));
1110	}
1111
1112	BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1113
1114	if (!barrier) {
1115		/* If there was a write error in the journal - we can't commit
1116		 * this transaction - it will be invalid and, if successful,
1117		 * will just end up propogating the write error out to
1118		 * the file system. */
1119		if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1120			if (buffer_dirty(jl->j_commit_bh))
1121				BUG();
1122			mark_buffer_dirty(jl->j_commit_bh) ;
1123			sync_dirty_buffer(jl->j_commit_bh) ;
1124		}
1125	} else
1126		wait_on_buffer(jl->j_commit_bh);
1127
1128	check_barrier_completion(s, jl->j_commit_bh);
1129
1130	/* If there was a write error in the journal - we can't commit this
1131	 * transaction - it will be invalid and, if successful, will just end
1132	 * up propogating the write error out to the filesystem. */
1133	if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1134#ifdef CONFIG_REISERFS_CHECK
1135		reiserfs_warning(s, "journal-615: buffer write failed");
1136#endif
1137		retval = -EIO;
1138	}
1139	bforget(jl->j_commit_bh);
1140	if (journal->j_last_commit_id != 0 &&
1141	    (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1142		reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
1143				 journal->j_last_commit_id, jl->j_trans_id);
1144	}
1145	journal->j_last_commit_id = jl->j_trans_id;
1146
1147	/* now, every commit block is on the disk.  It is safe to allow blocks freed during this transaction to be reallocated */
1148	cleanup_freed_for_journal_list(s, jl);
1149
1150	retval = retval ? retval : journal->j_errno;
1151
1152	/* mark the metadata dirty */
1153	if (!retval)
1154		dirty_one_transaction(s, jl);
1155	atomic_dec(&(jl->j_commit_left));
1156
1157	if (flushall) {
1158		atomic_set(&(jl->j_older_commits_done), 1);
1159	}
1160	up(&jl->j_commit_lock);
1161      put_jl:
1162	put_journal_list(s, jl);
1163
1164	if (retval)
1165		reiserfs_abort(s, retval, "Journal write error in %s",
1166			       __FUNCTION__);
1167	put_fs_excl();
1168	return retval;
1169}
1170
1171/*
1172** flush_journal_list frequently needs to find a newer transaction for a given block.  This does that, or
1173** returns NULL if it can't find anything
1174*/
1175static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1176							  reiserfs_journal_cnode
1177							  *cn)
1178{
1179	struct super_block *sb = cn->sb;
1180	b_blocknr_t blocknr = cn->blocknr;
1181
1182	cn = cn->hprev;
1183	while (cn) {
1184		if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1185			return cn->jlist;
1186		}
1187		cn = cn->hprev;
1188	}
1189	return NULL;
1190}
1191
1192static void remove_journal_hash(struct super_block *,
1193				struct reiserfs_journal_cnode **,
1194				struct reiserfs_journal_list *, unsigned long,
1195				int);
1196
1197/*
1198** once all the real blocks have been flushed, it is safe to remove them from the
1199** journal list for this transaction.  Aside from freeing the cnode, this also allows the
1200** block to be reallocated for data blocks if it had been deleted.
1201*/
1202static void remove_all_from_journal_list(struct super_block *p_s_sb,
1203					 struct reiserfs_journal_list *jl,
1204					 int debug)
1205{
1206	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1207	struct reiserfs_journal_cnode *cn, *last;
1208	cn = jl->j_realblock;
1209
1210	/* which is better, to lock once around the whole loop, or
1211	 ** to lock for each call to remove_journal_hash?
1212	 */
1213	while (cn) {
1214		if (cn->blocknr != 0) {
1215			if (debug) {
1216				reiserfs_warning(p_s_sb,
1217						 "block %u, bh is %d, state %ld",
1218						 cn->blocknr, cn->bh ? 1 : 0,
1219						 cn->state);
1220			}
1221			cn->state = 0;
1222			remove_journal_hash(p_s_sb, journal->j_list_hash_table,
1223					    jl, cn->blocknr, 1);
1224		}
1225		last = cn;
1226		cn = cn->next;
1227		free_cnode(p_s_sb, last);
1228	}
1229	jl->j_realblock = NULL;
1230}
1231
1232/*
1233** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1234** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1235** releasing blocks in this transaction for reuse as data blocks.
1236** called by flush_journal_list, before it calls remove_all_from_journal_list
1237**
1238*/
1239static int _update_journal_header_block(struct super_block *p_s_sb,
1240					unsigned long offset,
1241					unsigned long trans_id)
1242{
1243	struct reiserfs_journal_header *jh;
1244	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1245
1246	if (reiserfs_is_journal_aborted(journal))
1247		return -EIO;
1248
1249	if (trans_id >= journal->j_last_flush_trans_id) {
1250		if (buffer_locked((journal->j_header_bh))) {
1251			wait_on_buffer((journal->j_header_bh));
1252			if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1253#ifdef CONFIG_REISERFS_CHECK
1254				reiserfs_warning(p_s_sb,
1255						 "journal-699: buffer write failed");
1256#endif
1257				return -EIO;
1258			}
1259		}
1260		journal->j_last_flush_trans_id = trans_id;
1261		journal->j_first_unflushed_offset = offset;
1262		jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1263							b_data);
1264		jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1265		jh->j_first_unflushed_offset = cpu_to_le32(offset);
1266		jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1267
1268		if (reiserfs_barrier_flush(p_s_sb)) {
1269			int ret;
1270			lock_buffer(journal->j_header_bh);
1271			ret = submit_barrier_buffer(journal->j_header_bh);
1272			if (ret == -EOPNOTSUPP) {
1273				set_buffer_uptodate(journal->j_header_bh);
1274				disable_barrier(p_s_sb);
1275				goto sync;
1276			}
1277			wait_on_buffer(journal->j_header_bh);
1278			check_barrier_completion(p_s_sb, journal->j_header_bh);
1279		} else {
1280		      sync:
1281			set_buffer_dirty(journal->j_header_bh);
1282			sync_dirty_buffer(journal->j_header_bh);
1283		}
1284		if (!buffer_uptodate(journal->j_header_bh)) {
1285			reiserfs_warning(p_s_sb,
1286					 "journal-837: IO error during journal replay");
1287			return -EIO;
1288		}
1289	}
1290	return 0;
1291}
1292
1293static int update_journal_header_block(struct super_block *p_s_sb,
1294				       unsigned long offset,
1295				       unsigned long trans_id)
1296{
1297	return _update_journal_header_block(p_s_sb, offset, trans_id);
1298}
1299
1300/*
1301** flush any and all journal lists older than you are
1302** can only be called from flush_journal_list
1303*/
1304static int flush_older_journal_lists(struct super_block *p_s_sb,
1305				     struct reiserfs_journal_list *jl)
1306{
1307	struct list_head *entry;
1308	struct reiserfs_journal_list *other_jl;
1309	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1310	unsigned long trans_id = jl->j_trans_id;
1311
1312	/* we know we are the only ones flushing things, no extra race
1313	 * protection is required.
1314	 */
1315      restart:
1316	entry = journal->j_journal_list.next;
1317	/* Did we wrap? */
1318	if (entry == &journal->j_journal_list)
1319		return 0;
1320	other_jl = JOURNAL_LIST_ENTRY(entry);
1321	if (other_jl->j_trans_id < trans_id) {
1322		BUG_ON(other_jl->j_refcount <= 0);
1323		/* do not flush all */
1324		flush_journal_list(p_s_sb, other_jl, 0);
1325
1326		/* other_jl is now deleted from the list */
1327		goto restart;
1328	}
1329	return 0;
1330}
1331
1332static void del_from_work_list(struct super_block *s,
1333			       struct reiserfs_journal_list *jl)
1334{
1335	struct reiserfs_journal *journal = SB_JOURNAL(s);
1336	if (!list_empty(&jl->j_working_list)) {
1337		list_del_init(&jl->j_working_list);
1338		journal->j_num_work_lists--;
1339	}
1340}
1341
1342/* flush a journal list, both commit and real blocks
1343**
1344** always set flushall to 1, unless you are calling from inside
1345** flush_journal_list
1346**
1347** IMPORTANT.  This can only be called while there are no journal writers,
1348** and the journal is locked.  That means it can only be called from
1349** do_journal_end, or by journal_release
1350*/
1351static int flush_journal_list(struct super_block *s,
1352			      struct reiserfs_journal_list *jl, int flushall)
1353{
1354	struct reiserfs_journal_list *pjl;
1355	struct reiserfs_journal_cnode *cn, *last;
1356	int count;
1357	int was_jwait = 0;
1358	int was_dirty = 0;
1359	struct buffer_head *saved_bh;
1360	unsigned long j_len_saved = jl->j_len;
1361	struct reiserfs_journal *journal = SB_JOURNAL(s);
1362	int err = 0;
1363
1364	BUG_ON(j_len_saved <= 0);
1365
1366	if (atomic_read(&journal->j_wcount) != 0) {
1367		reiserfs_warning(s,
1368				 "clm-2048: flush_journal_list called with wcount %d",
1369				 atomic_read(&journal->j_wcount));
1370	}
1371	BUG_ON(jl->j_trans_id == 0);
1372
1373	/* if flushall == 0, the lock is already held */
1374	if (flushall) {
1375		down(&journal->j_flush_sem);
1376	} else if (!down_trylock(&journal->j_flush_sem)) {
1377		BUG();
1378	}
1379
1380	count = 0;
1381	if (j_len_saved > journal->j_trans_max) {
1382		reiserfs_panic(s,
1383			       "journal-715: flush_journal_list, length is %lu, trans id %lu\n",
1384			       j_len_saved, jl->j_trans_id);
1385		return 0;
1386	}
1387
1388	get_fs_excl();
1389
1390	/* if all the work is already done, get out of here */
1391	if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1392	    atomic_read(&(jl->j_commit_left)) <= 0) {
1393		goto flush_older_and_return;
1394	}
1395
1396	/* start by putting the commit list on disk.  This will also flush
1397	 ** the commit lists of any olders transactions
1398	 */
1399	flush_commit_list(s, jl, 1);
1400
1401	if (!(jl->j_state & LIST_DIRTY)
1402	    && !reiserfs_is_journal_aborted(journal))
1403		BUG();
1404
1405	/* are we done now? */
1406	if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1407	    atomic_read(&(jl->j_commit_left)) <= 0) {
1408		goto flush_older_and_return;
1409	}
1410
1411	/* loop through each cnode, see if we need to write it,
1412	 ** or wait on a more recent transaction, or just ignore it
1413	 */
1414	if (atomic_read(&(journal->j_wcount)) != 0) {
1415		reiserfs_panic(s,
1416			       "journal-844: panic journal list is flushing, wcount is not 0\n");
1417	}
1418	cn = jl->j_realblock;
1419	while (cn) {
1420		was_jwait = 0;
1421		was_dirty = 0;
1422		saved_bh = NULL;
1423		/* blocknr of 0 is no longer in the hash, ignore it */
1424		if (cn->blocknr == 0) {
1425			goto free_cnode;
1426		}
1427
1428		/* This transaction failed commit. Don't write out to the disk */
1429		if (!(jl->j_state & LIST_DIRTY))
1430			goto free_cnode;
1431
1432		pjl = find_newer_jl_for_cn(cn);
1433		/* the order is important here.  We check pjl to make sure we
1434		 ** don't clear BH_JDirty_wait if we aren't the one writing this
1435		 ** block to disk
1436		 */
1437		if (!pjl && cn->bh) {
1438			saved_bh = cn->bh;
1439
1440			/* we do this to make sure nobody releases the buffer while
1441			 ** we are working with it
1442			 */
1443			get_bh(saved_bh);
1444
1445			if (buffer_journal_dirty(saved_bh)) {
1446				BUG_ON(!can_dirty(cn));
1447				was_jwait = 1;
1448				was_dirty = 1;
1449			} else if (can_dirty(cn)) {
1450				/* everything with !pjl && jwait should be writable */
1451				BUG();
1452			}
1453		}
1454
1455		/* if someone has this block in a newer transaction, just make
1456		 ** sure they are commited, and don't try writing it to disk
1457		 */
1458		if (pjl) {
1459			if (atomic_read(&pjl->j_commit_left))
1460				flush_commit_list(s, pjl, 1);
1461			goto free_cnode;
1462		}
1463
1464		/* bh == NULL when the block got to disk on its own, OR,
1465		 ** the block got freed in a future transaction
1466		 */
1467		if (saved_bh == NULL) {
1468			goto free_cnode;
1469		}
1470
1471		/* this should never happen.  kupdate_one_transaction has this list
1472		 ** locked while it works, so we should never see a buffer here that
1473		 ** is not marked JDirty_wait
1474		 */
1475		if ((!was_jwait) && !buffer_locked(saved_bh)) {
1476			reiserfs_warning(s,
1477					 "journal-813: BAD! buffer %llu %cdirty %cjwait, "
1478					 "not in a newer tranasction",
1479					 (unsigned long long)saved_bh->
1480					 b_blocknr, was_dirty ? ' ' : '!',
1481					 was_jwait ? ' ' : '!');
1482		}
1483		if (was_dirty) {
1484			/* we inc again because saved_bh gets decremented at free_cnode */
1485			get_bh(saved_bh);
1486			set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1487			lock_buffer(saved_bh);
1488			BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1489			if (buffer_dirty(saved_bh))
1490				submit_logged_buffer(saved_bh);
1491			else
1492				unlock_buffer(saved_bh);
1493			count++;
1494		} else {
1495			reiserfs_warning(s,
1496					 "clm-2082: Unable to flush buffer %llu in %s",
1497					 (unsigned long long)saved_bh->
1498					 b_blocknr, __FUNCTION__);
1499		}
1500	      free_cnode:
1501		last = cn;
1502		cn = cn->next;
1503		if (saved_bh) {
1504			/* we incremented this to keep others from taking the buffer head away */
1505			put_bh(saved_bh);
1506			if (atomic_read(&(saved_bh->b_count)) < 0) {
1507				reiserfs_warning(s,
1508						 "journal-945: saved_bh->b_count < 0");
1509			}
1510		}
1511	}
1512	if (count > 0) {
1513		cn = jl->j_realblock;
1514		while (cn) {
1515			if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1516				if (!cn->bh) {
1517					reiserfs_panic(s,
1518						       "journal-1011: cn->bh is NULL\n");
1519				}
1520				wait_on_buffer(cn->bh);
1521				if (!cn->bh) {
1522					reiserfs_panic(s,
1523						       "journal-1012: cn->bh is NULL\n");
1524				}
1525				if (unlikely(!buffer_uptodate(cn->bh))) {
1526#ifdef CONFIG_REISERFS_CHECK
1527					reiserfs_warning(s,
1528							 "journal-949: buffer write failed\n");
1529#endif
1530					err = -EIO;
1531				}
1532				/* note, we must clear the JDirty_wait bit after the up to date
1533				 ** check, otherwise we race against our flushpage routine
1534				 */
1535				BUG_ON(!test_clear_buffer_journal_dirty
1536				       (cn->bh));
1537
1538				/* undo the inc from journal_mark_dirty */
1539				put_bh(cn->bh);
1540				brelse(cn->bh);
1541			}
1542			cn = cn->next;
1543		}
1544	}
1545
1546	if (err)
1547		reiserfs_abort(s, -EIO,
1548			       "Write error while pushing transaction to disk in %s",
1549			       __FUNCTION__);
1550      flush_older_and_return:
1551
1552	/* before we can update the journal header block, we _must_ flush all
1553	 ** real blocks from all older transactions to disk.  This is because
1554	 ** once the header block is updated, this transaction will not be
1555	 ** replayed after a crash
1556	 */
1557	if (flushall) {
1558		flush_older_journal_lists(s, jl);
1559	}
1560
1561	err = journal->j_errno;
1562	/* before we can remove everything from the hash tables for this
1563	 ** transaction, we must make sure it can never be replayed
1564	 **
1565	 ** since we are only called from do_journal_end, we know for sure there
1566	 ** are no allocations going on while we are flushing journal lists.  So,
1567	 ** we only need to update the journal header block for the last list
1568	 ** being flushed
1569	 */
1570	if (!err && flushall) {
1571		err =
1572		    update_journal_header_block(s,
1573						(jl->j_start + jl->j_len +
1574						 2) % SB_ONDISK_JOURNAL_SIZE(s),
1575						jl->j_trans_id);
1576		if (err)
1577			reiserfs_abort(s, -EIO,
1578				       "Write error while updating journal header in %s",
1579				       __FUNCTION__);
1580	}
1581	remove_all_from_journal_list(s, jl, 0);
1582	list_del_init(&jl->j_list);
1583	journal->j_num_lists--;
1584	del_from_work_list(s, jl);
1585
1586	if (journal->j_last_flush_id != 0 &&
1587	    (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1588		reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
1589				 journal->j_last_flush_id, jl->j_trans_id);
1590	}
1591	journal->j_last_flush_id = jl->j_trans_id;
1592
1593	/* not strictly required since we are freeing the list, but it should
1594	 * help find code using dead lists later on
1595	 */
1596	jl->j_len = 0;
1597	atomic_set(&(jl->j_nonzerolen), 0);
1598	jl->j_start = 0;
1599	jl->j_realblock = NULL;
1600	jl->j_commit_bh = NULL;
1601	jl->j_trans_id = 0;
1602	jl->j_state = 0;
1603	put_journal_list(s, jl);
1604	if (flushall)
1605		up(&journal->j_flush_sem);
1606	put_fs_excl();
1607	return err;
1608}
1609
1610static int write_one_transaction(struct super_block *s,
1611				 struct reiserfs_journal_list *jl,
1612				 struct buffer_chunk *chunk)
1613{
1614	struct reiserfs_journal_cnode *cn;
1615	int ret = 0;
1616
1617	jl->j_state |= LIST_TOUCHED;
1618	del_from_work_list(s, jl);
1619	if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1620		return 0;
1621	}
1622
1623	cn = jl->j_realblock;
1624	while (cn) {
1625		/* if the blocknr == 0, this has been cleared from the hash,
1626		 ** skip it
1627		 */
1628		if (cn->blocknr == 0) {
1629			goto next;
1630		}
1631		if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1632			struct buffer_head *tmp_bh;
1633			/* we can race against journal_mark_freed when we try
1634			 * to lock_buffer(cn->bh), so we have to inc the buffer
1635			 * count, and recheck things after locking
1636			 */
1637			tmp_bh = cn->bh;
1638			get_bh(tmp_bh);
1639			lock_buffer(tmp_bh);
1640			if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1641				if (!buffer_journal_dirty(tmp_bh) ||
1642				    buffer_journal_prepared(tmp_bh))
1643					BUG();
1644				add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1645				ret++;
1646			} else {
1647				/* note, cn->bh might be null now */
1648				unlock_buffer(tmp_bh);
1649			}
1650			put_bh(tmp_bh);
1651		}
1652	      next:
1653		cn = cn->next;
1654		cond_resched();
1655	}
1656	return ret;
1657}
1658
1659/* used by flush_commit_list */
1660static int dirty_one_transaction(struct super_block *s,
1661				 struct reiserfs_journal_list *jl)
1662{
1663	struct reiserfs_journal_cnode *cn;
1664	struct reiserfs_journal_list *pjl;
1665	int ret = 0;
1666
1667	jl->j_state |= LIST_DIRTY;
1668	cn = jl->j_realblock;
1669	while (cn) {
1670		/* look for a more recent transaction that logged this
1671		 ** buffer.  Only the most recent transaction with a buffer in
1672		 ** it is allowed to send that buffer to disk
1673		 */
1674		pjl = find_newer_jl_for_cn(cn);
1675		if (!pjl && cn->blocknr && cn->bh
1676		    && buffer_journal_dirty(cn->bh)) {
1677			BUG_ON(!can_dirty(cn));
1678			/* if the buffer is prepared, it will either be logged
1679			 * or restored.  If restored, we need to make sure
1680			 * it actually gets marked dirty
1681			 */
1682			clear_buffer_journal_new(cn->bh);
1683			if (buffer_journal_prepared(cn->bh)) {
1684				set_buffer_journal_restore_dirty(cn->bh);
1685			} else {
1686				set_buffer_journal_test(cn->bh);
1687				mark_buffer_dirty(cn->bh);
1688			}
1689		}
1690		cn = cn->next;
1691	}
1692	return ret;
1693}
1694
1695static int kupdate_transactions(struct super_block *s,
1696				struct reiserfs_journal_list *jl,
1697				struct reiserfs_journal_list **next_jl,
1698				unsigned long *next_trans_id,
1699				int num_blocks, int num_trans)
1700{
1701	int ret = 0;
1702	int written = 0;
1703	int transactions_flushed = 0;
1704	unsigned long orig_trans_id = jl->j_trans_id;
1705	struct buffer_chunk chunk;
1706	struct list_head *entry;
1707	struct reiserfs_journal *journal = SB_JOURNAL(s);
1708	chunk.nr = 0;
1709
1710	down(&journal->j_flush_sem);
1711	if (!journal_list_still_alive(s, orig_trans_id)) {
1712		goto done;
1713	}
1714
1715	/* we've got j_flush_sem held, nobody is going to delete any
1716	 * of these lists out from underneath us
1717	 */
1718	while ((num_trans && transactions_flushed < num_trans) ||
1719	       (!num_trans && written < num_blocks)) {
1720
1721		if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1722		    atomic_read(&jl->j_commit_left)
1723		    || !(jl->j_state & LIST_DIRTY)) {
1724			del_from_work_list(s, jl);
1725			break;
1726		}
1727		ret = write_one_transaction(s, jl, &chunk);
1728
1729		if (ret < 0)
1730			goto done;
1731		transactions_flushed++;
1732		written += ret;
1733		entry = jl->j_list.next;
1734
1735		/* did we wrap? */
1736		if (entry == &journal->j_journal_list) {
1737			break;
1738		}
1739		jl = JOURNAL_LIST_ENTRY(entry);
1740
1741		/* don't bother with older transactions */
1742		if (jl->j_trans_id <= orig_trans_id)
1743			break;
1744	}
1745	if (chunk.nr) {
1746		write_chunk(&chunk);
1747	}
1748
1749      done:
1750	up(&journal->j_flush_sem);
1751	return ret;
1752}
1753
1754/* for o_sync and fsync heavy applications, they tend to use
1755** all the journa list slots with tiny transactions.  These
1756** trigger lots and lots of calls to update the header block, which
1757** adds seeks and slows things down.
1758**
1759** This function tries to clear out a large chunk of the journal lists
1760** at once, which makes everything faster since only the newest journal
1761** list updates the header block
1762*/
1763static int flush_used_journal_lists(struct super_block *s,
1764				    struct reiserfs_journal_list *jl)
1765{
1766	unsigned long len = 0;
1767	unsigned long cur_len;
1768	int ret;
1769	int i;
1770	int limit = 256;
1771	struct reiserfs_journal_list *tjl;
1772	struct reiserfs_journal_list *flush_jl;
1773	unsigned long trans_id;
1774	struct reiserfs_journal *journal = SB_JOURNAL(s);
1775
1776	flush_jl = tjl = jl;
1777
1778	/* in data logging mode, try harder to flush a lot of blocks */
1779	if (reiserfs_data_log(s))
1780		limit = 1024;
1781	/* flush for 256 transactions or limit blocks, whichever comes first */
1782	for (i = 0; i < 256 && len < limit; i++) {
1783		if (atomic_read(&tjl->j_commit_left) ||
1784		    tjl->j_trans_id < jl->j_trans_id) {
1785			break;
1786		}
1787		cur_len = atomic_read(&tjl->j_nonzerolen);
1788		if (cur_len > 0) {
1789			tjl->j_state &= ~LIST_TOUCHED;
1790		}
1791		len += cur_len;
1792		flush_jl = tjl;
1793		if (tjl->j_list.next == &journal->j_journal_list)
1794			break;
1795		tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1796	}
1797	/* try to find a group of blocks we can flush across all the
1798	 ** transactions, but only bother if we've actually spanned
1799	 ** across multiple lists
1800	 */
1801	if (flush_jl != jl) {
1802		ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1803	}
1804	flush_journal_list(s, flush_jl, 1);
1805	return 0;
1806}
1807
1808/*
1809** removes any nodes in table with name block and dev as bh.
1810** only touchs the hnext and hprev pointers.
1811*/
1812void remove_journal_hash(struct super_block *sb,
1813			 struct reiserfs_journal_cnode **table,
1814			 struct reiserfs_journal_list *jl,
1815			 unsigned long block, int remove_freed)
1816{
1817	struct reiserfs_journal_cnode *cur;
1818	struct reiserfs_journal_cnode **head;
1819
1820	head = &(journal_hash(table, sb, block));
1821	if (!head) {
1822		return;
1823	}
1824	cur = *head;
1825	while (cur) {
1826		if (cur->blocknr == block && cur->sb == sb
1827		    && (jl == NULL || jl == cur->jlist)
1828		    && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1829			if (cur->hnext) {
1830				cur->hnext->hprev = cur->hprev;
1831			}
1832			if (cur->hprev) {
1833				cur->hprev->hnext = cur->hnext;
1834			} else {
1835				*head = cur->hnext;
1836			}
1837			cur->blocknr = 0;
1838			cur->sb = NULL;
1839			cur->state = 0;
1840			if (cur->bh && cur->jlist)	/* anybody who clears the cur->bh will also dec the nonzerolen */
1841				atomic_dec(&(cur->jlist->j_nonzerolen));
1842			cur->bh = NULL;
1843			cur->jlist = NULL;
1844		}
1845		cur = cur->hnext;
1846	}
1847}
1848
1849static void free_journal_ram(struct super_block *p_s_sb)
1850{
1851	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1852	kfree(journal->j_current_jl);
1853	journal->j_num_lists--;
1854
1855	vfree(journal->j_cnode_free_orig);
1856	free_list_bitmaps(p_s_sb, journal->j_list_bitmap);
1857	free_bitmap_nodes(p_s_sb);	/* must be after free_list_bitmaps */
1858	if (journal->j_header_bh) {
1859		brelse(journal->j_header_bh);
1860	}
1861	/* j_header_bh is on the journal dev, make sure not to release the journal
1862	 * dev until we brelse j_header_bh
1863	 */
1864	release_journal_dev(p_s_sb, journal);
1865	vfree(journal);
1866}
1867
1868/*
1869** call on unmount.  Only set error to 1 if you haven't made your way out
1870** of read_super() yet.  Any other caller must keep error at 0.
1871*/
1872static int do_journal_release(struct reiserfs_transaction_handle *th,
1873			      struct super_block *p_s_sb, int error)
1874{
1875	struct reiserfs_transaction_handle myth;
1876	int flushed = 0;
1877	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1878
1879	/* we only want to flush out transactions if we were called with error == 0
1880	 */
1881	if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1882		/* end the current trans */
1883		BUG_ON(!th->t_trans_id);
1884		do_journal_end(th, p_s_sb, 10, FLUSH_ALL);
1885
1886		/* make sure something gets logged to force our way into the flush code */
1887		if (!journal_join(&myth, p_s_sb, 1)) {
1888			reiserfs_prepare_for_journal(p_s_sb,
1889						     SB_BUFFER_WITH_SB(p_s_sb),
1890						     1);
1891			journal_mark_dirty(&myth, p_s_sb,
1892					   SB_BUFFER_WITH_SB(p_s_sb));
1893			do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1894			flushed = 1;
1895		}
1896	}
1897
1898	/* this also catches errors during the do_journal_end above */
1899	if (!error && reiserfs_is_journal_aborted(journal)) {
1900		memset(&myth, 0, sizeof(myth));
1901		if (!journal_join_abort(&myth, p_s_sb, 1)) {
1902			reiserfs_prepare_for_journal(p_s_sb,
1903						     SB_BUFFER_WITH_SB(p_s_sb),
1904						     1);
1905			journal_mark_dirty(&myth, p_s_sb,
1906					   SB_BUFFER_WITH_SB(p_s_sb));
1907			do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1908		}
1909	}
1910
1911	reiserfs_mounted_fs_count--;
1912	/* wait for all commits to finish */
1913	cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
1914	flush_workqueue(commit_wq);
1915	if (!reiserfs_mounted_fs_count) {
1916		destroy_workqueue(commit_wq);
1917		commit_wq = NULL;
1918	}
1919
1920	free_journal_ram(p_s_sb);
1921
1922	return 0;
1923}
1924
1925/*
1926** call on unmount.  flush all journal trans, release all alloc'd ram
1927*/
1928int journal_release(struct reiserfs_transaction_handle *th,
1929		    struct super_block *p_s_sb)
1930{
1931	return do_journal_release(th, p_s_sb, 0);
1932}
1933
1934/*
1935** only call from an error condition inside reiserfs_read_super!
1936*/
1937int journal_release_error(struct reiserfs_transaction_handle *th,
1938			  struct super_block *p_s_sb)
1939{
1940	return do_journal_release(th, p_s_sb, 1);
1941}
1942
1943/* compares description block with commit block.  returns 1 if they differ, 0 if they are the same */
1944static int journal_compare_desc_commit(struct super_block *p_s_sb,
1945				       struct reiserfs_journal_desc *desc,
1946				       struct reiserfs_journal_commit *commit)
1947{
1948	if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
1949	    get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
1950	    get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
1951	    get_commit_trans_len(commit) <= 0) {
1952		return 1;
1953	}
1954	return 0;
1955}
1956
1957/* returns 0 if it did not find a description block
1958** returns -1 if it found a corrupt commit block
1959** returns 1 if both desc and commit were valid
1960*/
1961static int journal_transaction_is_valid(struct super_block *p_s_sb,
1962					struct buffer_head *d_bh,
1963					unsigned long *oldest_invalid_trans_id,
1964					unsigned long *newest_mount_id)
1965{
1966	struct reiserfs_journal_desc *desc;
1967	struct reiserfs_journal_commit *commit;
1968	struct buffer_head *c_bh;
1969	unsigned long offset;
1970
1971	if (!d_bh)
1972		return 0;
1973
1974	desc = (struct reiserfs_journal_desc *)d_bh->b_data;
1975	if (get_desc_trans_len(desc) > 0
1976	    && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
1977		if (oldest_invalid_trans_id && *oldest_invalid_trans_id
1978		    && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
1979			reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1980				       "journal-986: transaction "
1981				       "is valid returning because trans_id %d is greater than "
1982				       "oldest_invalid %lu",
1983				       get_desc_trans_id(desc),
1984				       *oldest_invalid_trans_id);
1985			return 0;
1986		}
1987		if (newest_mount_id
1988		    && *newest_mount_id > get_desc_mount_id(desc)) {
1989			reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1990				       "journal-1087: transaction "
1991				       "is valid returning because mount_id %d is less than "
1992				       "newest_mount_id %lu",
1993				       get_desc_mount_id(desc),
1994				       *newest_mount_id);
1995			return -1;
1996		}
1997		if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) {
1998			reiserfs_warning(p_s_sb,
1999					 "journal-2018: Bad transaction length %d encountered, ignoring transaction",
2000					 get_desc_trans_len(desc));
2001			return -1;
2002		}
2003		offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2004
2005		/* ok, we have a journal description block, lets see if the transaction was valid */
2006		c_bh =
2007		    journal_bread(p_s_sb,
2008				  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2009				  ((offset + get_desc_trans_len(desc) +
2010				    1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2011		if (!c_bh)
2012			return 0;
2013		commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2014		if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2015			reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2016				       "journal_transaction_is_valid, commit offset %ld had bad "
2017				       "time %d or length %d",
2018				       c_bh->b_blocknr -
2019				       SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2020				       get_commit_trans_id(commit),
2021				       get_commit_trans_len(commit));
2022			brelse(c_bh);
2023			if (oldest_invalid_trans_id) {
2024				*oldest_invalid_trans_id =
2025				    get_desc_trans_id(desc);
2026				reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2027					       "journal-1004: "
2028					       "transaction_is_valid setting oldest invalid trans_id "
2029					       "to %d",
2030					       get_desc_trans_id(desc));
2031			}
2032			return -1;
2033		}
2034		brelse(c_bh);
2035		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2036			       "journal-1006: found valid "
2037			       "transaction start offset %llu, len %d id %d",
2038			       d_bh->b_blocknr -
2039			       SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2040			       get_desc_trans_len(desc),
2041			       get_desc_trans_id(desc));
2042		return 1;
2043	} else {
2044		return 0;
2045	}
2046}
2047
2048static void brelse_array(struct buffer_head **heads, int num)
2049{
2050	int i;
2051	for (i = 0; i < num; i++) {
2052		brelse(heads[i]);
2053	}
2054}
2055
2056/*
2057** given the start, and values for the oldest acceptable transactions,
2058** this either reads in a replays a transaction, or returns because the transaction
2059** is invalid, or too old.
2060*/
2061static int journal_read_transaction(struct super_block *p_s_sb,
2062				    unsigned long cur_dblock,
2063				    unsigned long oldest_start,
2064				    unsigned long oldest_trans_id,
2065				    unsigned long newest_mount_id)
2066{
2067	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2068	struct reiserfs_journal_desc *desc;
2069	struct reiserfs_journal_commit *commit;
2070	unsigned long trans_id = 0;
2071	struct buffer_head *c_bh;
2072	struct buffer_head *d_bh;
2073	struct buffer_head **log_blocks = NULL;
2074	struct buffer_head **real_blocks = NULL;
2075	unsigned long trans_offset;
2076	int i;
2077	int trans_half;
2078
2079	d_bh = journal_bread(p_s_sb, cur_dblock);
2080	if (!d_bh)
2081		return 1;
2082	desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2083	trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2084	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
2085		       "journal_read_transaction, offset %llu, len %d mount_id %d",
2086		       d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2087		       get_desc_trans_len(desc), get_desc_mount_id(desc));
2088	if (get_desc_trans_id(desc) < oldest_trans_id) {
2089		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
2090			       "journal_read_trans skipping because %lu is too old",
2091			       cur_dblock -
2092			       SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2093		brelse(d_bh);
2094		return 1;
2095	}
2096	if (get_desc_mount_id(desc) != newest_mount_id) {
2097		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
2098			       "journal_read_trans skipping because %d is != "
2099			       "newest_mount_id %lu", get_desc_mount_id(desc),
2100			       newest_mount_id);
2101		brelse(d_bh);
2102		return 1;
2103	}
2104	c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2105			     ((trans_offset + get_desc_trans_len(desc) + 1) %
2106			      SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2107	if (!c_bh) {
2108		brelse(d_bh);
2109		return 1;
2110	}
2111	commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2112	if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2113		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2114			       "journal_read_transaction, "
2115			       "commit offset %llu had bad time %d or length %d",
2116			       c_bh->b_blocknr -
2117			       SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2118			       get_commit_trans_id(commit),
2119			       get_commit_trans_len(commit));
2120		brelse(c_bh);
2121		brelse(d_bh);
2122		return 1;
2123	}
2124	trans_id = get_desc_trans_id(desc);
2125	/* now we know we've got a good transaction, and it was inside the valid time ranges */
2126	log_blocks = kmalloc(get_desc_trans_len(desc) *
2127			     sizeof(struct buffer_head *), GFP_NOFS);
2128	real_blocks = kmalloc(get_desc_trans_len(desc) *
2129			      sizeof(struct buffer_head *), GFP_NOFS);
2130	if (!log_blocks || !real_blocks) {
2131		brelse(c_bh);
2132		brelse(d_bh);
2133		kfree(log_blocks);
2134		kfree(real_blocks);
2135		reiserfs_warning(p_s_sb,
2136				 "journal-1169: kmalloc failed, unable to mount FS");
2137		return -1;
2138	}
2139	/* get all the buffer heads */
2140	trans_half = journal_trans_half(p_s_sb->s_blocksize);
2141	for (i = 0; i < get_desc_trans_len(desc); i++) {
2142		log_blocks[i] =
2143		    journal_getblk(p_s_sb,
2144				   SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2145				   (trans_offset + 1 +
2146				    i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2147		if (i < trans_half) {
2148			real_blocks[i] =
2149			    sb_getblk(p_s_sb,
2150				      le32_to_cpu(desc->j_realblock[i]));
2151		} else {
2152			real_blocks[i] =
2153			    sb_getblk(p_s_sb,
2154				      le32_to_cpu(commit->
2155						  j_realblock[i - trans_half]));
2156		}
2157		if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
2158			reiserfs_warning(p_s_sb,
2159					 "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
2160			goto abort_replay;
2161		}
2162		/* make sure we don't try to replay onto log or reserved area */
2163		if (is_block_in_log_or_reserved_area
2164		    (p_s_sb, real_blocks[i]->b_blocknr)) {
2165			reiserfs_warning(p_s_sb,
2166					 "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block");
2167		      abort_replay:
2168			brelse_array(log_blocks, i);
2169			brelse_array(real_blocks, i);
2170			brelse(c_bh);
2171			brelse(d_bh);
2172			kfree(log_blocks);
2173			kfree(real_blocks);
2174			return -1;
2175		}
2176	}
2177	/* read in the log blocks, memcpy to the corresponding real block */
2178	ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2179	for (i = 0; i < get_desc_trans_len(desc); i++) {
2180		wait_on_buffer(log_blocks[i]);
2181		if (!buffer_uptodate(log_blocks[i])) {
2182			reiserfs_warning(p_s_sb,
2183					 "journal-1212: REPLAY FAILURE fsck required! buffer write failed");
2184			brelse_array(log_blocks + i,
2185				     get_desc_trans_len(desc) - i);
2186			brelse_array(real_blocks, get_desc_trans_len(desc));
2187			brelse(c_bh);
2188			brelse(d_bh);
2189			kfree(log_blocks);
2190			kfree(real_blocks);
2191			return -1;
2192		}
2193		memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2194		       real_blocks[i]->b_size);
2195		set_buffer_uptodate(real_blocks[i]);
2196		brelse(log_blocks[i]);
2197	}
2198	/* flush out the real blocks */
2199	for (i = 0; i < get_desc_trans_len(desc); i++) {
2200		set_buffer_dirty(real_blocks[i]);
2201		ll_rw_block(SWRITE, 1, real_blocks + i);
2202	}
2203	for (i = 0; i < get_desc_trans_len(desc); i++) {
2204		wait_on_buffer(real_blocks[i]);
2205		if (!buffer_uptodate(real_blocks[i])) {
2206			reiserfs_warning(p_s_sb,
2207					 "journal-1226: REPLAY FAILURE, fsck required! buffer write failed");
2208			brelse_array(real_blocks + i,
2209				     get_desc_trans_len(desc) - i);
2210			brelse(c_bh);
2211			brelse(d_bh);
2212			kfree(log_blocks);
2213			kfree(real_blocks);
2214			return -1;
2215		}
2216		brelse(real_blocks[i]);
2217	}
2218	cur_dblock =
2219	    SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2220	    ((trans_offset + get_desc_trans_len(desc) +
2221	      2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2222	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2223		       "journal-1095: setting journal " "start to offset %ld",
2224		       cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2225
2226	/* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2227	journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2228	journal->j_last_flush_trans_id = trans_id;
2229	journal->j_trans_id = trans_id + 1;
2230	brelse(c_bh);
2231	brelse(d_bh);
2232	kfree(log_blocks);
2233	kfree(real_blocks);
2234	return 0;
2235}
2236
2237/* This function reads blocks starting from block and to max_block of bufsize
2238   size (but no more than BUFNR blocks at a time). This proved to improve
2239   mounting speed on self-rebuilding raid5 arrays at least.
2240   Right now it is only used from journal code. But later we might use it
2241   from other places.
2242   Note: Do not use journal_getblk/sb_getblk functions here! */
2243static struct buffer_head *reiserfs_breada(struct block_device *dev, int block,
2244					   int bufsize, unsigned int max_block)
2245{
2246	struct buffer_head *bhlist[BUFNR];
2247	unsigned int blocks = BUFNR;
2248	struct buffer_head *bh;
2249	int i, j;
2250
2251	bh = __getblk(dev, block, bufsize);
2252	if (buffer_uptodate(bh))
2253		return (bh);
2254
2255	if (block + BUFNR > max_block) {
2256		blocks = max_block - block;
2257	}
2258	bhlist[0] = bh;
2259	j = 1;
2260	for (i = 1; i < blocks; i++) {
2261		bh = __getblk(dev, block + i, bufsize);
2262		if (buffer_uptodate(bh)) {
2263			brelse(bh);
2264			break;
2265		} else
2266			bhlist[j++] = bh;
2267	}
2268	ll_rw_block(READ, j, bhlist);
2269	for (i = 1; i < j; i++)
2270		brelse(bhlist[i]);
2271	bh = bhlist[0];
2272	wait_on_buffer(bh);
2273	if (buffer_uptodate(bh))
2274		return bh;
2275	brelse(bh);
2276	return NULL;
2277}
2278
2279/*
2280** read and replay the log
2281** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2282** transaction.  This tests that before finding all the transactions in the log, which makes normal mount times fast.
2283**
2284** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2285**
2286** On exit, it sets things up so the first transaction will work correctly.
2287*/
2288static int journal_read(struct super_block *p_s_sb)
2289{
2290	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2291	struct reiserfs_journal_desc *desc;
2292	unsigned long oldest_trans_id = 0;
2293	unsigned long oldest_invalid_trans_id = 0;
2294	time_t start;
2295	unsigned long oldest_start = 0;
2296	unsigned long cur_dblock = 0;
2297	unsigned long newest_mount_id = 9;
2298	struct buffer_head *d_bh;
2299	struct reiserfs_journal_header *jh;
2300	int valid_journal_header = 0;
2301	int replay_count = 0;
2302	int continue_replay = 1;
2303	int ret;
2304	char b[BDEVNAME_SIZE];
2305
2306	cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2307	reiserfs_info(p_s_sb, "checking transaction log (%s)\n",
2308		      bdevname(journal->j_dev_bd, b));
2309	start = get_seconds();
2310
2311	/* step 1, read in the journal header block.  Check the transaction it says
2312	 ** is the first unflushed, and if that transaction is not valid,
2313	 ** replay is done
2314	 */
2315	journal->j_header_bh = journal_bread(p_s_sb,
2316					     SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)
2317					     + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2318	if (!journal->j_header_bh) {
2319		return 1;
2320	}
2321	jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
2322	if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
2323	    le32_to_cpu(jh->j_first_unflushed_offset) <
2324	    SB_ONDISK_JOURNAL_SIZE(p_s_sb)
2325	    && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2326		oldest_start =
2327		    SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2328		    le32_to_cpu(jh->j_first_unflushed_offset);
2329		oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2330		newest_mount_id = le32_to_cpu(jh->j_mount_id);
2331		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2332			       "journal-1153: found in "
2333			       "header: first_unflushed_offset %d, last_flushed_trans_id "
2334			       "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2335			       le32_to_cpu(jh->j_last_flush_trans_id));
2336		valid_journal_header = 1;
2337
2338		/* now, we try to read the first unflushed offset.  If it is not valid,
2339		 ** there is nothing more we can do, and it makes no sense to read
2340		 ** through the whole log.
2341		 */
2342		d_bh =
2343		    journal_bread(p_s_sb,
2344				  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2345				  le32_to_cpu(jh->j_first_unflushed_offset));
2346		ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL);
2347		if (!ret) {
2348			continue_replay = 0;
2349		}
2350		brelse(d_bh);
2351		goto start_log_replay;
2352	}
2353
2354	if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
2355		reiserfs_warning(p_s_sb,
2356				 "clm-2076: device is readonly, unable to replay log");
2357		return -1;
2358	}
2359
2360	/* ok, there are transactions that need to be replayed.  start with the first log block, find
2361	 ** all the valid transactions, and pick out the oldest.
2362	 */
2363	while (continue_replay
2364	       && cur_dblock <
2365	       (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2366		SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
2367		/* Note that it is required for blocksize of primary fs device and journal
2368		   device to be the same */
2369		d_bh =
2370		    reiserfs_breada(journal->j_dev_bd, cur_dblock,
2371				    p_s_sb->s_blocksize,
2372				    SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2373				    SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2374		ret =
2375		    journal_transaction_is_valid(p_s_sb, d_bh,
2376						 &oldest_invalid_trans_id,
2377						 &newest_mount_id);
2378		if (ret == 1) {
2379			desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2380			if (oldest_start == 0) {	/* init all oldest_ values */
2381				oldest_trans_id = get_desc_trans_id(desc);
2382				oldest_start = d_bh->b_blocknr;
2383				newest_mount_id = get_desc_mount_id(desc);
2384				reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2385					       "journal-1179: Setting "
2386					       "oldest_start to offset %llu, trans_id %lu",
2387					       oldest_start -
2388					       SB_ONDISK_JOURNAL_1st_BLOCK
2389					       (p_s_sb), oldest_trans_id);
2390			} else if (oldest_trans_id > get_desc_trans_id(desc)) {
2391				/* one we just read was older */
2392				oldest_trans_id = get_desc_trans_id(desc);
2393				oldest_start = d_bh->b_blocknr;
2394				reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2395					       "journal-1180: Resetting "
2396					       "oldest_start to offset %lu, trans_id %lu",
2397					       oldest_start -
2398					       SB_ONDISK_JOURNAL_1st_BLOCK
2399					       (p_s_sb), oldest_trans_id);
2400			}
2401			if (newest_mount_id < get_desc_mount_id(desc)) {
2402				newest_mount_id = get_desc_mount_id(desc);
2403				reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2404					       "journal-1299: Setting "
2405					       "newest_mount_id to %d",
2406					       get_desc_mount_id(desc));
2407			}
2408			cur_dblock += get_desc_trans_len(desc) + 2;
2409		} else {
2410			cur_dblock++;
2411		}
2412		brelse(d_bh);
2413	}
2414
2415      start_log_replay:
2416	cur_dblock = oldest_start;
2417	if (oldest_trans_id) {
2418		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2419			       "journal-1206: Starting replay "
2420			       "from offset %llu, trans_id %lu",
2421			       cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2422			       oldest_trans_id);
2423
2424	}
2425	replay_count = 0;
2426	while (continue_replay && oldest_trans_id > 0) {
2427		ret =
2428		    journal_read_transaction(p_s_sb, cur_dblock, oldest_start,
2429					     oldest_trans_id, newest_mount_id);
2430		if (ret < 0) {
2431			return ret;
2432		} else if (ret != 0) {
2433			break;
2434		}
2435		cur_dblock =
2436		    SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start;
2437		replay_count++;
2438		if (cur_dblock == oldest_start)
2439			break;
2440	}
2441
2442	if (oldest_trans_id == 0) {
2443		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2444			       "journal-1225: No valid " "transactions found");
2445	}
2446	/* j_start does not get set correctly if we don't replay any transactions.
2447	 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2448	 ** copy the trans_id from the header
2449	 */
2450	if (valid_journal_header && replay_count == 0) {
2451		journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2452		journal->j_trans_id =
2453		    le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2454		journal->j_last_flush_trans_id =
2455		    le32_to_cpu(jh->j_last_flush_trans_id);
2456		journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2457	} else {
2458		journal->j_mount_id = newest_mount_id + 1;
2459	}
2460	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
2461		       "newest_mount_id to %lu", journal->j_mount_id);
2462	journal->j_first_unflushed_offset = journal->j_start;
2463	if (replay_count > 0) {
2464		reiserfs_info(p_s_sb,
2465			      "replayed %d transactions in %lu seconds\n",
2466			      replay_count, get_seconds() - start);
2467	}
2468	if (!bdev_read_only(p_s_sb->s_bdev) &&
2469	    _update_journal_header_block(p_s_sb, journal->j_start,
2470					 journal->j_last_flush_trans_id)) {
2471		/* replay failed, caller must call free_journal_ram and abort
2472		 ** the mount
2473		 */
2474		return -1;
2475	}
2476	return 0;
2477}
2478
2479static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2480{
2481	struct reiserfs_journal_list *jl;
2482	jl = kzalloc(sizeof(struct reiserfs_journal_list),
2483		     GFP_NOFS | __GFP_NOFAIL);
2484	INIT_LIST_HEAD(&jl->j_list);
2485	INIT_LIST_HEAD(&jl->j_working_list);
2486	INIT_LIST_HEAD(&jl->j_tail_bh_list);
2487	INIT_LIST_HEAD(&jl->j_bh_list);
2488	sema_init(&jl->j_commit_lock, 1);
2489	SB_JOURNAL(s)->j_num_lists++;
2490	get_journal_list(jl);
2491	return jl;
2492}
2493
2494static void journal_list_init(struct super_block *p_s_sb)
2495{
2496	SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
2497}
2498
2499static int release_journal_dev(struct super_block *super,
2500			       struct reiserfs_journal *journal)
2501{
2502	int result;
2503
2504	result = 0;
2505
2506	if (journal->j_dev_file != NULL) {
2507		result = filp_close(journal->j_dev_file, NULL);
2508		journal->j_dev_file = NULL;
2509		journal->j_dev_bd = NULL;
2510	} else if (journal->j_dev_bd != NULL) {
2511		result = blkdev_put(journal->j_dev_bd);
2512		journal->j_dev_bd = NULL;
2513	}
2514
2515	if (result != 0) {
2516		reiserfs_warning(super,
2517				 "sh-457: release_journal_dev: Cannot release journal device: %i",
2518				 result);
2519	}
2520	return result;
2521}
2522
2523static int journal_init_dev(struct super_block *super,
2524			    struct reiserfs_journal *journal,
2525			    const char *jdev_name)
2526{
2527	int result;
2528	dev_t jdev;
2529	int blkdev_mode = FMODE_READ | FMODE_WRITE;
2530	char b[BDEVNAME_SIZE];
2531
2532	result = 0;
2533
2534	journal->j_dev_bd = NULL;
2535	journal->j_dev_file = NULL;
2536	jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2537	    new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
2538
2539	if (bdev_read_only(super->s_bdev))
2540		blkdev_mode = FMODE_READ;
2541
2542	/* there is no "jdev" option and journal is on separate device */
2543	if ((!jdev_name || !jdev_name[0])) {
2544		journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
2545		if (IS_ERR(journal->j_dev_bd)) {
2546			result = PTR_ERR(journal->j_dev_bd);
2547			journal->j_dev_bd = NULL;
2548			reiserfs_warning(super, "sh-458: journal_init_dev: "
2549					 "cannot init journal device '%s': %i",
2550					 __bdevname(jdev, b), result);
2551			return result;
2552		} else if (jdev != super->s_dev)
2553			set_blocksize(journal->j_dev_bd, super->s_blocksize);
2554		return 0;
2555	}
2556
2557	journal->j_dev_file = filp_open(jdev_name, 0, 0);
2558	if (!IS_ERR(journal->j_dev_file)) {
2559		struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
2560		if (!S_ISBLK(jdev_inode->i_mode)) {
2561			reiserfs_warning(super, "journal_init_dev: '%s' is "
2562					 "not a block device", jdev_name);
2563			result = -ENOTBLK;
2564			release_journal_dev(super, journal);
2565		} else {
2566			/* ok */
2567			journal->j_dev_bd = I_BDEV(jdev_inode);
2568			set_blocksize(journal->j_dev_bd, super->s_blocksize);
2569			reiserfs_info(super,
2570				      "journal_init_dev: journal device: %s\n",
2571				      bdevname(journal->j_dev_bd, b));
2572		}
2573	} else {
2574		result = PTR_ERR(journal->j_dev_file);
2575		journal->j_dev_file = NULL;
2576		reiserfs_warning(super,
2577				 "journal_init_dev: Cannot open '%s': %i",
2578				 jdev_name, result);
2579	}
2580	return result;
2581}
2582
2583/*
2584** must be called once on fs mount.  calls journal_read for you
2585*/
2586int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2587		 int old_format, unsigned int commit_max_age)
2588{
2589	int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2;
2590	struct buffer_head *bhjh;
2591	struct reiserfs_super_block *rs;
2592	struct reiserfs_journal_header *jh;
2593	struct reiserfs_journal *journal;
2594	struct reiserfs_journal_list *jl;
2595	char b[BDEVNAME_SIZE];
2596
2597	journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal));
2598	if (!journal) {
2599		reiserfs_warning(p_s_sb,
2600				 "journal-1256: unable to get memory for journal structure");
2601		return 1;
2602	}
2603	memset(journal, 0, sizeof(struct reiserfs_journal));
2604	INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2605	INIT_LIST_HEAD(&journal->j_prealloc_list);
2606	INIT_LIST_HEAD(&journal->j_working_list);
2607	INIT_LIST_HEAD(&journal->j_journal_list);
2608	journal->j_persistent_trans = 0;
2609	if (reiserfs_allocate_list_bitmaps(p_s_sb,
2610					   journal->j_list_bitmap,
2611					   SB_BMAP_NR(p_s_sb)))
2612		goto free_and_return;
2613	allocate_bitmap_nodes(p_s_sb);
2614
2615	/* reserved for journal area support */
2616	SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
2617						 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2618						 / p_s_sb->s_blocksize +
2619						 SB_BMAP_NR(p_s_sb) +
2620						 1 :
2621						 REISERFS_DISK_OFFSET_IN_BYTES /
2622						 p_s_sb->s_blocksize + 2);
2623
2624	/* Sanity check to see is the standard journal fitting withing first bitmap
2625	   (actual for small blocksizes) */
2626	if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) &&
2627	    (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) +
2628	     SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) {
2629		reiserfs_warning(p_s_sb,
2630				 "journal-1393: journal does not fit for area "
2631				 "addressed by first of bitmap blocks. It starts at "
2632				 "%u and its size is %u. Block size %ld",
2633				 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
2634				 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2635				 p_s_sb->s_blocksize);
2636		goto free_and_return;
2637	}
2638
2639	if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) {
2640		reiserfs_warning(p_s_sb,
2641				 "sh-462: unable to initialize jornal device");
2642		goto free_and_return;
2643	}
2644
2645	rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2646
2647	/* read journal header */
2648	bhjh = journal_bread(p_s_sb,
2649			     SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2650			     SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2651	if (!bhjh) {
2652		reiserfs_warning(p_s_sb,
2653				 "sh-459: unable to read journal header");
2654		goto free_and_return;
2655	}
2656	jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2657
2658	/* make sure that journal matches to the super block */
2659	if (is_reiserfs_jr(rs)
2660	    && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2661		sb_jp_journal_magic(rs))) {
2662		reiserfs_warning(p_s_sb,
2663				 "sh-460: journal header magic %x "
2664				 "(device %s) does not match to magic found in super "
2665				 "block %x", jh->jh_journal.jp_journal_magic,
2666				 bdevname(journal->j_dev_bd, b),
2667				 sb_jp_journal_magic(rs));
2668		brelse(bhjh);
2669		goto free_and_return;
2670	}
2671
2672	journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2673	journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2674	journal->j_max_commit_age =
2675	    le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2676	journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2677
2678	if (journal->j_trans_max) {
2679		/* make sure these parameters are available, assign it if they are not */
2680		__u32 initial = journal->j_trans_max;
2681		__u32 ratio = 1;
2682
2683		if (p_s_sb->s_blocksize < 4096)
2684			ratio = 4096 / p_s_sb->s_blocksize;
2685
2686		if (SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max <
2687		    JOURNAL_MIN_RATIO)
2688			journal->j_trans_max =
2689			    SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
2690		if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
2691			journal->j_trans_max =
2692			    JOURNAL_TRANS_MAX_DEFAULT / ratio;
2693		if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
2694			journal->j_trans_max =
2695			    JOURNAL_TRANS_MIN_DEFAULT / ratio;
2696
2697		if (journal->j_trans_max != initial)
2698			reiserfs_warning(p_s_sb,
2699					 "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
2700					 initial, journal->j_trans_max);
2701
2702		journal->j_max_batch = journal->j_trans_max *
2703		    JOURNAL_MAX_BATCH_DEFAULT / JOURNAL_TRANS_MAX_DEFAULT;
2704	}
2705
2706	if (!journal->j_trans_max) {
2707		/*we have the file system was created by old version of mkreiserfs
2708		   so this field contains zero value */
2709		journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2710		journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2711		journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2712
2713		/* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
2714		   trans max size is decreased proportionally */
2715		if (p_s_sb->s_blocksize < 4096) {
2716			journal->j_trans_max /= (4096 / p_s_sb->s_blocksize);
2717			journal->j_max_batch = (journal->j_trans_max) * 9 / 10;
2718		}
2719	}
2720
2721	journal->j_default_max_commit_age = journal->j_max_commit_age;
2722
2723	if (commit_max_age != 0) {
2724		journal->j_max_commit_age = commit_max_age;
2725		journal->j_max_trans_age = commit_max_age;
2726	}
2727
2728	reiserfs_info(p_s_sb, "journal params: device %s, size %u, "
2729		      "journal first block %u, max trans len %u, max batch %u, "
2730		      "max commit age %u, max trans age %u\n",
2731		      bdevname(journal->j_dev_bd, b),
2732		      SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2733		      SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2734		      journal->j_trans_max,
2735		      journal->j_max_batch,
2736		      journal->j_max_commit_age, journal->j_max_trans_age);
2737
2738	brelse(bhjh);
2739
2740	journal->j_list_bitmap_index = 0;
2741	journal_list_init(p_s_sb);
2742
2743	memset(journal->j_list_hash_table, 0,
2744	       JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2745
2746	INIT_LIST_HEAD(&journal->j_dirty_buffers);
2747	spin_lock_init(&journal->j_dirty_buffers_lock);
2748
2749	journal->j_start = 0;
2750	journal->j_len = 0;
2751	journal->j_len_alloc = 0;
2752	atomic_set(&(journal->j_wcount), 0);
2753	atomic_set(&(journal->j_async_throttle), 0);
2754	journal->j_bcount = 0;
2755	journal->j_trans_start_time = 0;
2756	journal->j_last = NULL;
2757	journal->j_first = NULL;
2758	init_waitqueue_head(&(journal->j_join_wait));
2759	sema_init(&journal->j_lock, 1);
2760	sema_init(&journal->j_flush_sem, 1);
2761
2762	journal->j_trans_id = 10;
2763	journal->j_mount_id = 10;
2764	journal->j_state = 0;
2765	atomic_set(&(journal->j_jlock), 0);
2766	journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2767	journal->j_cnode_free_orig = journal->j_cnode_free_list;
2768	journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2769	journal->j_cnode_used = 0;
2770	journal->j_must_wait = 0;
2771
2772	if (journal->j_cnode_free == 0) {
2773        	reiserfs_warning(p_s_sb, "journal-2004: Journal cnode memory "
2774		                 "allocation failed (%ld bytes). Journal is "
2775		                 "too large for available memory. Usually "
2776		                 "this is due to a journal that is too large.",
2777		                 sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2778        	goto free_and_return;
2779	}
2780
2781	init_journal_hash(p_s_sb);
2782	jl = journal->j_current_jl;
2783	jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
2784	if (!jl->j_list_bitmap) {
2785		reiserfs_warning(p_s_sb,
2786				 "journal-2005, get_list_bitmap failed for journal list 0");
2787		goto free_and_return;
2788	}
2789	if (journal_read(p_s_sb) < 0) {
2790		reiserfs_warning(p_s_sb, "Replay Failure, unable to mount");
2791		goto free_and_return;
2792	}
2793
2794	reiserfs_mounted_fs_count++;
2795	if (reiserfs_mounted_fs_count <= 1)
2796		commit_wq = create_workqueue("reiserfs");
2797
2798	INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
2799	return 0;
2800      free_and_return:
2801	free_journal_ram(p_s_sb);
2802	return 1;
2803}
2804
2805/*
2806** test for a polite end of the current transaction.  Used by file_write, and should
2807** be used by delete to make sure they don't write more than can fit inside a single
2808** transaction
2809*/
2810int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2811				   int new_alloc)
2812{
2813	struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2814	time_t now = get_seconds();
2815	/* cannot restart while nested */
2816	BUG_ON(!th->t_trans_id);
2817	if (th->t_refcount > 1)
2818		return 0;
2819	if (journal->j_must_wait > 0 ||
2820	    (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2821	    atomic_read(&(journal->j_jlock)) ||
2822	    (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2823	    journal->j_cnode_free < (journal->j_trans_max * 3)) {
2824		return 1;
2825	}
2826	return 0;
2827}
2828
2829/* this must be called inside a transaction, and requires the
2830** kernel_lock to be held
2831*/
2832void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2833{
2834	struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2835	BUG_ON(!th->t_trans_id);
2836	journal->j_must_wait = 1;
2837	set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2838	return;
2839}
2840
2841/* this must be called without a transaction started, and does not
2842** require BKL
2843*/
2844void reiserfs_allow_writes(struct super_block *s)
2845{
2846	struct reiserfs_journal *journal = SB_JOURNAL(s);
2847	clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
2848	wake_up(&journal->j_join_wait);
2849}
2850
2851/* this must be called without a transaction started, and does not
2852** require BKL
2853*/
2854void reiserfs_wait_on_write_block(struct super_block *s)
2855{
2856	struct reiserfs_journal *journal = SB_JOURNAL(s);
2857	wait_event(journal->j_join_wait,
2858		   !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
2859}
2860
2861static void queue_log_writer(struct super_block *s)
2862{
2863	wait_queue_t wait;
2864	struct reiserfs_journal *journal = SB_JOURNAL(s);
2865	set_bit(J_WRITERS_QUEUED, &journal->j_state);
2866
2867	/*
2868	 * we don't want to use wait_event here because
2869	 * we only want to wait once.
2870	 */
2871	init_waitqueue_entry(&wait, current);
2872	add_wait_queue(&journal->j_join_wait, &wait);
2873	set_current_state(TASK_UNINTERRUPTIBLE);
2874	if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
2875		schedule();
2876	current->state = TASK_RUNNING;
2877	remove_wait_queue(&journal->j_join_wait, &wait);
2878}
2879
2880static void wake_queued_writers(struct super_block *s)
2881{
2882	struct reiserfs_journal *journal = SB_JOURNAL(s);
2883	if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2884		wake_up(&journal->j_join_wait);
2885}
2886
2887static void let_transaction_grow(struct super_block *sb, unsigned long trans_id)
2888{
2889	struct reiserfs_journal *journal = SB_JOURNAL(sb);
2890	unsigned long bcount = journal->j_bcount;
2891	while (1) {
2892		schedule_timeout_uninterruptible(1);
2893		journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2894		while ((atomic_read(&journal->j_wcount) > 0 ||
2895			atomic_read(&journal->j_jlock)) &&
2896		       journal->j_trans_id == trans_id) {
2897			queue_log_writer(sb);
2898		}
2899		if (journal->j_trans_id != trans_id)
2900			break;
2901		if (bcount == journal->j_bcount)
2902			break;
2903		bcount = journal->j_bcount;
2904	}
2905}
2906
2907/* join == true if you must join an existing transaction.
2908** join == false if you can deal with waiting for others to finish
2909**
2910** this will block until the transaction is joinable.  send the number of blocks you
2911** expect to use in nblocks.
2912*/
2913static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
2914			      struct super_block *p_s_sb, unsigned long nblocks,
2915			      int join)
2916{
2917	time_t now = get_seconds();
2918	int old_trans_id;
2919	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2920	struct reiserfs_transaction_handle myth;
2921	int sched_count = 0;
2922	int retval;
2923
2924	reiserfs_check_lock_depth(p_s_sb, "journal_begin");
2925	if (nblocks > journal->j_trans_max)
2926		BUG();
2927
2928	PROC_INFO_INC(p_s_sb, journal.journal_being);
2929	/* set here for journal_join */
2930	th->t_refcount = 1;
2931	th->t_super = p_s_sb;
2932
2933      relock:
2934	lock_journal(p_s_sb);
2935	if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
2936		unlock_journal(p_s_sb);
2937		retval = journal->j_errno;
2938		goto out_fail;
2939	}
2940	journal->j_bcount++;
2941
2942	if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
2943		unlock_journal(p_s_sb);
2944		reiserfs_wait_on_write_block(p_s_sb);
2945		PROC_INFO_INC(p_s_sb, journal.journal_relock_writers);
2946		goto relock;
2947	}
2948	now = get_seconds();
2949
2950	/* if there is no room in the journal OR
2951	 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2952	 ** we don't sleep if there aren't other writers
2953	 */
2954
2955	if ((!join && journal->j_must_wait > 0) ||
2956	    (!join
2957	     && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
2958	    || (!join && atomic_read(&journal->j_wcount) > 0
2959		&& journal->j_trans_start_time > 0
2960		&& (now - journal->j_trans_start_time) >
2961		journal->j_max_trans_age) || (!join
2962					      && atomic_read(&journal->j_jlock))
2963	    || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
2964
2965		old_trans_id = journal->j_trans_id;
2966		unlock_journal(p_s_sb);	/* allow others to finish this transaction */
2967
2968		if (!join && (journal->j_len_alloc + nblocks + 2) >=
2969		    journal->j_max_batch &&
2970		    ((journal->j_len + nblocks + 2) * 100) <
2971		    (journal->j_len_alloc * 75)) {
2972			if (atomic_read(&journal->j_wcount) > 10) {
2973				sched_count++;
2974				queue_log_writer(p_s_sb);
2975				goto relock;
2976			}
2977		}
2978		/* don't mess with joining the transaction if all we have to do is
2979		 * wait for someone else to do a commit
2980		 */
2981		if (atomic_read(&journal->j_jlock)) {
2982			while (journal->j_trans_id == old_trans_id &&
2983			       atomic_read(&journal->j_jlock)) {
2984				queue_log_writer(p_s_sb);
2985			}
2986			goto relock;
2987		}
2988		retval = journal_join(&myth, p_s_sb, 1);
2989		if (retval)
2990			goto out_fail;
2991
2992		/* someone might have ended the transaction while we joined */
2993		if (old_trans_id != journal->j_trans_id) {
2994			retval = do_journal_end(&myth, p_s_sb, 1, 0);
2995		} else {
2996			retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW);
2997		}
2998
2999		if (retval)
3000			goto out_fail;
3001
3002		PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount);
3003		goto relock;
3004	}
3005	/* we are the first writer, set trans_id */
3006	if (journal->j_trans_start_time == 0) {
3007		journal->j_trans_start_time = get_seconds();
3008	}
3009	atomic_inc(&(journal->j_wcount));
3010	journal->j_len_alloc += nblocks;
3011	th->t_blocks_logged = 0;
3012	th->t_blocks_allocated = nblocks;
3013	th->t_trans_id = journal->j_trans_id;
3014	unlock_journal(p_s_sb);
3015	INIT_LIST_HEAD(&th->t_list);
3016	get_fs_excl();
3017	return 0;
3018
3019      out_fail:
3020	memset(th, 0, sizeof(*th));
3021	/* Re-set th->t_super, so we can properly keep track of how many
3022	 * persistent transactions there are. We need to do this so if this
3023	 * call is part of a failed restart_transaction, we can free it later */
3024	th->t_super = p_s_sb;
3025	return retval;
3026}
3027
3028struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3029								    super_block
3030								    *s,
3031								    int nblocks)
3032{
3033	int ret;
3034	struct reiserfs_transaction_handle *th;
3035
3036	/* if we're nesting into an existing transaction.  It will be
3037	 ** persistent on its own
3038	 */
3039	if (reiserfs_transaction_running(s)) {
3040		th = current->journal_info;
3041		th->t_refcount++;
3042		if (th->t_refcount < 2) {
3043			BUG();
3044		}
3045		return th;
3046	}
3047	th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
3048	if (!th)
3049		return NULL;
3050	ret = journal_begin(th, s, nblocks);
3051	if (ret) {
3052		kfree(th);
3053		return NULL;
3054	}
3055
3056	SB_JOURNAL(s)->j_persistent_trans++;
3057	return th;
3058}
3059
3060int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3061{
3062	struct super_block *s = th->t_super;
3063	int ret = 0;
3064	if (th->t_trans_id)
3065		ret = journal_end(th, th->t_super, th->t_blocks_allocated);
3066	else
3067		ret = -EIO;
3068	if (th->t_refcount == 0) {
3069		SB_JOURNAL(s)->j_persistent_trans--;
3070		kfree(th);
3071	}
3072	return ret;
3073}
3074
3075static int journal_join(struct reiserfs_transaction_handle *th,
3076			struct super_block *p_s_sb, unsigned long nblocks)
3077{
3078	struct reiserfs_transaction_handle *cur_th = current->journal_info;
3079
3080	/* this keeps do_journal_end from NULLing out the current->journal_info
3081	 ** pointer
3082	 */
3083	th->t_handle_save = cur_th;
3084	if (cur_th && cur_th->t_refcount > 1) {
3085		BUG();
3086	}
3087	return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
3088}
3089
3090int journal_join_abort(struct reiserfs_transaction_handle *th,
3091		       struct super_block *p_s_sb, unsigned long nblocks)
3092{
3093	struct reiserfs_transaction_handle *cur_th = current->journal_info;
3094
3095	/* this keeps do_journal_end from NULLing out the current->journal_info
3096	 ** pointer
3097	 */
3098	th->t_handle_save = cur_th;
3099	if (cur_th && cur_th->t_refcount > 1) {
3100		BUG();
3101	}
3102	return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
3103}
3104
3105int journal_begin(struct reiserfs_transaction_handle *th,
3106		  struct super_block *p_s_sb, unsigned long nblocks)
3107{
3108	struct reiserfs_transaction_handle *cur_th = current->journal_info;
3109	int ret;
3110
3111	th->t_handle_save = NULL;
3112	if (cur_th) {
3113		/* we are nesting into the current transaction */
3114		if (cur_th->t_super == p_s_sb) {
3115			BUG_ON(!cur_th->t_refcount);
3116			cur_th->t_refcount++;
3117			memcpy(th, cur_th, sizeof(*th));
3118			if (th->t_refcount <= 1)
3119				reiserfs_warning(p_s_sb,
3120						 "BAD: refcount <= 1, but journal_info != 0");
3121			return 0;
3122		} else {
3123			/* we've ended up with a handle from a different filesystem.
3124			 ** save it and restore on journal_end.  This should never
3125			 ** really happen...
3126			 */
3127			reiserfs_warning(p_s_sb,
3128					 "clm-2100: nesting info a different FS");
3129			th->t_handle_save = current->journal_info;
3130			current->journal_info = th;
3131		}
3132	} else {
3133		current->journal_info = th;
3134	}
3135	ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
3136	if (current->journal_info != th)
3137		BUG();
3138
3139	/* I guess this boils down to being the reciprocal of clm-2100 above.
3140	 * If do_journal_begin_r fails, we need to put it back, since journal_end
3141	 * won't be called to do it. */
3142	if (ret)
3143		current->journal_info = th->t_handle_save;
3144	else
3145		BUG_ON(!th->t_refcount);
3146
3147	return ret;
3148}
3149
3150/*
3151** puts bh into the current transaction.  If it was already there, reorders removes the
3152** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3153**
3154** if it was dirty, cleans and files onto the clean list.  I can't let it be dirty again until the
3155** transaction is committed.
3156**
3157** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3158*/
3159int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3160		       struct super_block *p_s_sb, struct buffer_head *bh)
3161{
3162	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3163	struct reiserfs_journal_cnode *cn = NULL;
3164	int count_already_incd = 0;
3165	int prepared = 0;
3166	BUG_ON(!th->t_trans_id);
3167
3168	PROC_INFO_INC(p_s_sb, journal.mark_dirty);
3169	if (th->t_trans_id != journal->j_trans_id) {
3170		reiserfs_panic(th->t_super,
3171			       "journal-1577: handle trans id %ld != current trans id %ld\n",
3172			       th->t_trans_id, journal->j_trans_id);
3173	}
3174
3175	p_s_sb->s_dirt = 1;
3176
3177	prepared = test_clear_buffer_journal_prepared(bh);
3178	clear_buffer_journal_restore_dirty(bh);
3179	/* already in this transaction, we are done */
3180	if (buffer_journaled(bh)) {
3181		PROC_INFO_INC(p_s_sb, journal.mark_dirty_already);
3182		return 0;
3183	}
3184
3185	/* this must be turned into a panic instead of a warning.  We can't allow
3186	 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3187	 ** could get to disk too early.  NOT GOOD.
3188	 */
3189	if (!prepared || buffer_dirty(bh)) {
3190		reiserfs_warning(p_s_sb, "journal-1777: buffer %llu bad state "
3191				 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3192				 (unsigned long long)bh->b_blocknr,
3193				 prepared ? ' ' : '!',
3194				 buffer_locked(bh) ? ' ' : '!',
3195				 buffer_dirty(bh) ? ' ' : '!',
3196				 buffer_journal_dirty(bh) ? ' ' : '!');
3197	}
3198
3199	if (atomic_read(&(journal->j_wcount)) <= 0) {
3200		reiserfs_warning(p_s_sb,
3201				 "journal-1409: journal_mark_dirty returning because j_wcount was %d",
3202				 atomic_read(&(journal->j_wcount)));
3203		return 1;
3204	}
3205	/* this error means I've screwed up, and we've overflowed the transaction.
3206	 ** Nothing can be done here, except make the FS readonly or panic.
3207	 */
3208	if (journal->j_len >= journal->j_trans_max) {
3209		reiserfs_panic(th->t_super,
3210			       "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n",
3211			       journal->j_len);
3212	}
3213
3214	if (buffer_journal_dirty(bh)) {
3215		count_already_incd = 1;
3216		PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal);
3217		clear_buffer_journal_dirty(bh);
3218	}
3219
3220	if (journal->j_len > journal->j_len_alloc) {
3221		journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3222	}
3223
3224	set_buffer_journaled(bh);
3225
3226	/* now put this guy on the end */
3227	if (!cn) {
3228		cn = get_cnode(p_s_sb);
3229		if (!cn) {
3230			reiserfs_panic(p_s_sb, "get_cnode failed!\n");
3231		}
3232
3233		if (th->t_blocks_logged == th->t_blocks_allocated) {
3234			th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3235			journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3236		}
3237		th->t_blocks_logged++;
3238		journal->j_len++;
3239
3240		cn->bh = bh;
3241		cn->blocknr = bh->b_blocknr;
3242		cn->sb = p_s_sb;
3243		cn->jlist = NULL;
3244		insert_journal_hash(journal->j_hash_table, cn);
3245		if (!count_already_incd) {
3246			get_bh(bh);
3247		}
3248	}
3249	cn->next = NULL;
3250	cn->prev = journal->j_last;
3251	cn->bh = bh;
3252	if (journal->j_last) {
3253		journal->j_last->next = cn;
3254		journal->j_last = cn;
3255	} else {
3256		journal->j_first = cn;
3257		journal->j_last = cn;
3258	}
3259	return 0;
3260}
3261
3262int journal_end(struct reiserfs_transaction_handle *th,
3263		struct super_block *p_s_sb, unsigned long nblocks)
3264{
3265	if (!current->journal_info && th->t_refcount > 1)
3266		reiserfs_warning(p_s_sb, "REISER-NESTING: th NULL, refcount %d",
3267				 th->t_refcount);
3268
3269	if (!th->t_trans_id) {
3270		WARN_ON(1);
3271		return -EIO;
3272	}
3273
3274	th->t_refcount--;
3275	if (th->t_refcount > 0) {
3276		struct reiserfs_transaction_handle *cur_th =
3277		    current->journal_info;
3278
3279		/* we aren't allowed to close a nested transaction on a different
3280		 ** filesystem from the one in the task struct
3281		 */
3282		if (cur_th->t_super != th->t_super)
3283			BUG();
3284
3285		if (th != cur_th) {
3286			memcpy(current->journal_info, th, sizeof(*th));
3287			th->t_trans_id = 0;
3288		}
3289		return 0;
3290	} else {
3291		return do_journal_end(th, p_s_sb, nblocks, 0);
3292	}
3293}
3294
3295/* removes from the current transaction, relsing and descrementing any counters.
3296** also files the removed buffer directly onto the clean list
3297**
3298** called by journal_mark_freed when a block has been deleted
3299**
3300** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3301*/
3302static int remove_from_transaction(struct super_block *p_s_sb,
3303				   b_blocknr_t blocknr, int already_cleaned)
3304{
3305	struct buffer_head *bh;
3306	struct reiserfs_journal_cnode *cn;
3307	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3308	int ret = 0;
3309
3310	cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3311	if (!cn || !cn->bh) {
3312		return ret;
3313	}
3314	bh = cn->bh;
3315	if (cn->prev) {
3316		cn->prev->next = cn->next;
3317	}
3318	if (cn->next) {
3319		cn->next->prev = cn->prev;
3320	}
3321	if (cn == journal->j_first) {
3322		journal->j_first = cn->next;
3323	}
3324	if (cn == journal->j_last) {
3325		journal->j_last = cn->prev;
3326	}
3327	if (bh)
3328		remove_journal_hash(p_s_sb, journal->j_hash_table, NULL,
3329				    bh->b_blocknr, 0);
3330	clear_buffer_journaled(bh);	/* don't log this one */
3331
3332	if (!already_cleaned) {
3333		clear_buffer_journal_dirty(bh);
3334		clear_buffer_dirty(bh);
3335		clear_buffer_journal_test(bh);
3336		put_bh(bh);
3337		if (atomic_read(&(bh->b_count)) < 0) {
3338			reiserfs_warning(p_s_sb,
3339					 "journal-1752: remove from trans, b_count < 0");
3340		}
3341		ret = 1;
3342	}
3343	journal->j_len--;
3344	journal->j_len_alloc--;
3345	free_cnode(p_s_sb, cn);
3346	return ret;
3347}
3348
3349/*
3350** for any cnode in a journal list, it can only be dirtied of all the
3351** transactions that include it are commited to disk.
3352** this checks through each transaction, and returns 1 if you are allowed to dirty,
3353** and 0 if you aren't
3354**
3355** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3356** blocks for a given transaction on disk
3357**
3358*/
3359static int can_dirty(struct reiserfs_journal_cnode *cn)
3360{
3361	struct super_block *sb = cn->sb;
3362	b_blocknr_t blocknr = cn->blocknr;
3363	struct reiserfs_journal_cnode *cur = cn->hprev;
3364	int can_dirty = 1;
3365
3366	/* first test hprev.  These are all newer than cn, so any node here
3367	 ** with the same block number and dev means this node can't be sent
3368	 ** to disk right now.
3369	 */
3370	while (cur && can_dirty) {
3371		if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3372		    cur->blocknr == blocknr) {
3373			can_dirty = 0;
3374		}
3375		cur = cur->hprev;
3376	}
3377	/* then test hnext.  These are all older than cn.  As long as they
3378	 ** are committed to the log, it is safe to write cn to disk
3379	 */
3380	cur = cn->hnext;
3381	while (cur && can_dirty) {
3382		if (cur->jlist && cur->jlist->j_len > 0 &&
3383		    atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3384		    cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3385			can_dirty = 0;
3386		}
3387		cur = cur->hnext;
3388	}
3389	return can_dirty;
3390}
3391
3392/* syncs the commit blocks, but does not force the real buffers to disk
3393** will wait until the current transaction is done/commited before returning
3394*/
3395int journal_end_sync(struct reiserfs_transaction_handle *th,
3396		     struct super_block *p_s_sb, unsigned long nblocks)
3397{
3398	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3399
3400	BUG_ON(!th->t_trans_id);
3401	/* you can sync while nested, very, very bad */
3402	if (th->t_refcount > 1) {
3403		BUG();
3404	}
3405	if (journal->j_len == 0) {
3406		reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3407					     1);
3408		journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3409	}
3410	return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT);
3411}
3412
3413/*
3414** writeback the pending async commits to disk
3415*/
3416static void flush_async_commits(void *p)
3417{
3418	struct super_block *p_s_sb = p;
3419	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3420	struct reiserfs_journal_list *jl;
3421	struct list_head *entry;
3422
3423	lock_kernel();
3424	if (!list_empty(&journal->j_journal_list)) {
3425		/* last entry is the youngest, commit it and you get everything */
3426		entry = journal->j_journal_list.prev;
3427		jl = JOURNAL_LIST_ENTRY(entry);
3428		flush_commit_list(p_s_sb, jl, 1);
3429	}
3430	unlock_kernel();
3431	/*
3432	 * this is a little racey, but there's no harm in missing
3433	 * the filemap_fdata_write
3434	 */
3435	if (!atomic_read(&journal->j_async_throttle)
3436	    && !reiserfs_is_journal_aborted(journal)) {
3437		atomic_inc(&journal->j_async_throttle);
3438		filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
3439		atomic_dec(&journal->j_async_throttle);
3440	}
3441}
3442
3443/*
3444** flushes any old transactions to disk
3445** ends the current transaction if it is too old
3446*/
3447int reiserfs_flush_old_commits(struct super_block *p_s_sb)
3448{
3449	time_t now;
3450	struct reiserfs_transaction_handle th;
3451	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3452
3453	now = get_seconds();
3454	/* safety check so we don't flush while we are replaying the log during
3455	 * mount
3456	 */
3457	if (list_empty(&journal->j_journal_list)) {
3458		return 0;
3459	}
3460
3461	/* check the current transaction.  If there are no writers, and it is
3462	 * too old, finish it, and force the commit blocks to disk
3463	 */
3464	if (atomic_read(&journal->j_wcount) <= 0 &&
3465	    journal->j_trans_start_time > 0 &&
3466	    journal->j_len > 0 &&
3467	    (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3468		if (!journal_join(&th, p_s_sb, 1)) {
3469			reiserfs_prepare_for_journal(p_s_sb,
3470						     SB_BUFFER_WITH_SB(p_s_sb),
3471						     1);
3472			journal_mark_dirty(&th, p_s_sb,
3473					   SB_BUFFER_WITH_SB(p_s_sb));
3474
3475			/* we're only being called from kreiserfsd, it makes no sense to do
3476			 ** an async commit so that kreiserfsd can do it later
3477			 */
3478			do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT);
3479		}
3480	}
3481	return p_s_sb->s_dirt;
3482}
3483
3484/*
3485** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3486**
3487** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3488** the writers are done.  By the time it wakes up, the transaction it was called has already ended, so it just
3489** flushes the commit list and returns 0.
3490**
3491** Won't batch when flush or commit_now is set.  Also won't batch when others are waiting on j_join_wait.
3492**
3493** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3494*/
3495static int check_journal_end(struct reiserfs_transaction_handle *th,
3496			     struct super_block *p_s_sb, unsigned long nblocks,
3497			     int flags)
3498{
3499
3500	time_t now;
3501	int flush = flags & FLUSH_ALL;
3502	int commit_now = flags & COMMIT_NOW;
3503	int wait_on_commit = flags & WAIT;
3504	struct reiserfs_journal_list *jl;
3505	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3506
3507	BUG_ON(!th->t_trans_id);
3508
3509	if (th->t_trans_id != journal->j_trans_id) {
3510		reiserfs_panic(th->t_super,
3511			       "journal-1577: handle trans id %ld != current trans id %ld\n",
3512			       th->t_trans_id, journal->j_trans_id);
3513	}
3514
3515	journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3516	if (atomic_read(&(journal->j_wcount)) > 0) {	/* <= 0 is allowed.  unmounting might not call begin */
3517		atomic_dec(&(journal->j_wcount));
3518	}
3519
3520	/* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3521	 ** will be dealt with by next transaction that actually writes something, but should be taken
3522	 ** care of in this trans
3523	 */
3524	if (journal->j_len == 0) {
3525		BUG();
3526	}
3527	/* if wcount > 0, and we are called to with flush or commit_now,
3528	 ** we wait on j_join_wait.  We will wake up when the last writer has
3529	 ** finished the transaction, and started it on its way to the disk.
3530	 ** Then, we flush the commit or journal list, and just return 0
3531	 ** because the rest of journal end was already done for this transaction.
3532	 */
3533	if (atomic_read(&(journal->j_wcount)) > 0) {
3534		if (flush || commit_now) {
3535			unsigned trans_id;
3536
3537			jl = journal->j_current_jl;
3538			trans_id = jl->j_trans_id;
3539			if (wait_on_commit)
3540				jl->j_state |= LIST_COMMIT_PENDING;
3541			atomic_set(&(journal->j_jlock), 1);
3542			if (flush) {
3543				journal->j_next_full_flush = 1;
3544			}
3545			unlock_journal(p_s_sb);
3546
3547			/* sleep while the current transaction is still j_jlocked */
3548			while (journal->j_trans_id == trans_id) {
3549				if (atomic_read(&journal->j_jlock)) {
3550					queue_log_writer(p_s_sb);
3551				} else {
3552					lock_journal(p_s_sb);
3553					if (journal->j_trans_id == trans_id) {
3554						atomic_set(&(journal->j_jlock),
3555							   1);
3556					}
3557					unlock_journal(p_s_sb);
3558				}
3559			}
3560			if (journal->j_trans_id == trans_id) {
3561				BUG();
3562			}
3563			if (commit_now
3564			    && journal_list_still_alive(p_s_sb, trans_id)
3565			    && wait_on_commit) {
3566				flush_commit_list(p_s_sb, jl, 1);
3567			}
3568			return 0;
3569		}
3570		unlock_journal(p_s_sb);
3571		return 0;
3572	}
3573
3574	/* deal with old transactions where we are the last writers */
3575	now = get_seconds();
3576	if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3577		commit_now = 1;
3578		journal->j_next_async_flush = 1;
3579	}
3580	/* don't batch when someone is waiting on j_join_wait */
3581	/* don't batch when syncing the commit or flushing the whole trans */
3582	if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
3583	    && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3584	    && journal->j_len_alloc < journal->j_max_batch
3585	    && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3586		journal->j_bcount++;
3587		unlock_journal(p_s_sb);
3588		return 0;
3589	}
3590
3591	if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3592		reiserfs_panic(p_s_sb,
3593			       "journal-003: journal_end: j_start (%ld) is too high\n",
3594			       journal->j_start);
3595	}
3596	return 1;
3597}
3598
3599/*
3600** Does all the work that makes deleting blocks safe.
3601** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3602**
3603** otherwise:
3604** set a bit for the block in the journal bitmap.  That will prevent it from being allocated for unformatted nodes
3605** before this transaction has finished.
3606**
3607** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.  That will prevent any old transactions with
3608** this block from trying to flush to the real location.  Since we aren't removing the cnode from the journal_list_hash,
3609** the block can't be reallocated yet.
3610**
3611** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3612*/
3613int journal_mark_freed(struct reiserfs_transaction_handle *th,
3614		       struct super_block *p_s_sb, b_blocknr_t blocknr)
3615{
3616	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3617	struct reiserfs_journal_cnode *cn = NULL;
3618	struct buffer_head *bh = NULL;
3619	struct reiserfs_list_bitmap *jb = NULL;
3620	int cleaned = 0;
3621	BUG_ON(!th->t_trans_id);
3622
3623	cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3624	if (cn && cn->bh) {
3625		bh = cn->bh;
3626		get_bh(bh);
3627	}
3628	/* if it is journal new, we just remove it from this transaction */
3629	if (bh && buffer_journal_new(bh)) {
3630		clear_buffer_journal_new(bh);
3631		clear_prepared_bits(bh);
3632		reiserfs_clean_and_file_buffer(bh);
3633		cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3634	} else {
3635		/* set the bit for this block in the journal bitmap for this transaction */
3636		jb = journal->j_current_jl->j_list_bitmap;
3637		if (!jb) {
3638			reiserfs_panic(p_s_sb,
3639				       "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n");
3640		}
3641		set_bit_in_list_bitmap(p_s_sb, blocknr, jb);
3642
3643		/* Note, the entire while loop is not allowed to schedule.  */
3644
3645		if (bh) {
3646			clear_prepared_bits(bh);
3647			reiserfs_clean_and_file_buffer(bh);
3648		}
3649		cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3650
3651		/* find all older transactions with this block, make sure they don't try to write it out */
3652		cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table,
3653					  blocknr);
3654		while (cn) {
3655			if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
3656				set_bit(BLOCK_FREED, &cn->state);
3657				if (cn->bh) {
3658					if (!cleaned) {
3659						/* remove_from_transaction will brelse the buffer if it was
3660						 ** in the current trans
3661						 */
3662						clear_buffer_journal_dirty(cn->
3663									   bh);
3664						clear_buffer_dirty(cn->bh);
3665						clear_buffer_journal_test(cn->
3666									  bh);
3667						cleaned = 1;
3668						put_bh(cn->bh);
3669						if (atomic_read
3670						    (&(cn->bh->b_count)) < 0) {
3671							reiserfs_warning(p_s_sb,
3672									 "journal-2138: cn->bh->b_count < 0");
3673						}
3674					}
3675					if (cn->jlist) {	/* since we are clearing the bh, we MUST dec nonzerolen */
3676						atomic_dec(&
3677							   (cn->jlist->
3678							    j_nonzerolen));
3679					}
3680					cn->bh = NULL;
3681				}
3682			}
3683			cn = cn->hnext;
3684		}
3685	}
3686
3687	if (bh) {
3688		put_bh(bh);	/* get_hash grabs the buffer */
3689		if (atomic_read(&(bh->b_count)) < 0) {
3690			reiserfs_warning(p_s_sb,
3691					 "journal-2165: bh->b_count < 0");
3692		}
3693	}
3694	return 0;
3695}
3696
3697void reiserfs_update_inode_transaction(struct inode *inode)
3698{
3699	struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3700	REISERFS_I(inode)->i_jl = journal->j_current_jl;
3701	REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
3702}
3703
3704/*
3705 * returns -1 on error, 0 if no commits/barriers were done and 1
3706 * if a transaction was actually committed and the barrier was done
3707 */
3708static int __commit_trans_jl(struct inode *inode, unsigned long id,
3709			     struct reiserfs_journal_list *jl)
3710{
3711	struct reiserfs_transaction_handle th;
3712	struct super_block *sb = inode->i_sb;
3713	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3714	int ret = 0;
3715
3716	/* is it from the current transaction, or from an unknown transaction? */
3717	if (id == journal->j_trans_id) {
3718		jl = journal->j_current_jl;
3719		/* try to let other writers come in and grow this transaction */
3720		let_transaction_grow(sb, id);
3721		if (journal->j_trans_id != id) {
3722			goto flush_commit_only;
3723		}
3724
3725		ret = journal_begin(&th, sb, 1);
3726		if (ret)
3727			return ret;
3728
3729		/* someone might have ended this transaction while we joined */
3730		if (journal->j_trans_id != id) {
3731			reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3732						     1);
3733			journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
3734			ret = journal_end(&th, sb, 1);
3735			goto flush_commit_only;
3736		}
3737
3738		ret = journal_end_sync(&th, sb, 1);
3739		if (!ret)
3740			ret = 1;
3741
3742	} else {
3743		/* this gets tricky, we have to make sure the journal list in
3744		 * the inode still exists.  We know the list is still around
3745		 * if we've got a larger transaction id than the oldest list
3746		 */
3747	      flush_commit_only:
3748		if (journal_list_still_alive(inode->i_sb, id)) {
3749			/*
3750			 * we only set ret to 1 when we know for sure
3751			 * the barrier hasn't been started yet on the commit
3752			 * block.
3753			 */
3754			if (atomic_read(&jl->j_commit_left) > 1)
3755				ret = 1;
3756			flush_commit_list(sb, jl, 1);
3757			if (journal->j_errno)
3758				ret = journal->j_errno;
3759		}
3760	}
3761	/* otherwise the list is gone, and long since committed */
3762	return ret;
3763}
3764
3765int reiserfs_commit_for_inode(struct inode *inode)
3766{
3767	unsigned long id = REISERFS_I(inode)->i_trans_id;
3768	struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
3769
3770	/* for the whole inode, assume unset id means it was
3771	 * changed in the current transaction.  More conservative
3772	 */
3773	if (!id || !jl) {
3774		reiserfs_update_inode_transaction(inode);
3775		id = REISERFS_I(inode)->i_trans_id;
3776		/* jl will be updated in __commit_trans_jl */
3777	}
3778
3779	return __commit_trans_jl(inode, id, jl);
3780}
3781
3782void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
3783				      struct buffer_head *bh)
3784{
3785	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3786	PROC_INFO_INC(p_s_sb, journal.restore_prepared);
3787	if (!bh) {
3788		return;
3789	}
3790	if (test_clear_buffer_journal_restore_dirty(bh) &&
3791	    buffer_journal_dirty(bh)) {
3792		struct reiserfs_journal_cnode *cn;
3793		cn = get_journal_hash_dev(p_s_sb,
3794					  journal->j_list_hash_table,
3795					  bh->b_blocknr);
3796		if (cn && can_dirty(cn)) {
3797			set_buffer_journal_test(bh);
3798			mark_buffer_dirty(bh);
3799		}
3800	}
3801	clear_buffer_journal_prepared(bh);
3802}
3803
3804extern struct tree_balance *cur_tb;
3805/*
3806** before we can change a metadata block, we have to make sure it won't
3807** be written to disk while we are altering it.  So, we must:
3808** clean it
3809** wait on it.
3810**
3811*/
3812int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
3813				 struct buffer_head *bh, int wait)
3814{
3815	PROC_INFO_INC(p_s_sb, journal.prepare);
3816
3817	if (test_set_buffer_locked(bh)) {
3818		if (!wait)
3819			return 0;
3820		lock_buffer(bh);
3821	}
3822	set_buffer_journal_prepared(bh);
3823	if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3824		clear_buffer_journal_test(bh);
3825		set_buffer_journal_restore_dirty(bh);
3826	}
3827	unlock_buffer(bh);
3828	return 1;
3829}
3830
3831static void flush_old_journal_lists(struct super_block *s)
3832{
3833	struct reiserfs_journal *journal = SB_JOURNAL(s);
3834	struct reiserfs_journal_list *jl;
3835	struct list_head *entry;
3836	time_t now = get_seconds();
3837
3838	while (!list_empty(&journal->j_journal_list)) {
3839		entry = journal->j_journal_list.next;
3840		jl = JOURNAL_LIST_ENTRY(entry);
3841		/* this check should always be run, to send old lists to disk */
3842		if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
3843			flush_used_journal_lists(s, jl);
3844		} else {
3845			break;
3846		}
3847	}
3848}
3849
3850/*
3851** long and ugly.  If flush, will not return until all commit
3852** blocks and all real buffers in the trans are on disk.
3853** If no_async, won't return until all commit blocks are on disk.
3854**
3855** keep reading, there are comments as you go along
3856**
3857** If the journal is aborted, we just clean up. Things like flushing
3858** journal lists, etc just won't happen.
3859*/
3860static int do_journal_end(struct reiserfs_transaction_handle *th,
3861			  struct super_block *p_s_sb, unsigned long nblocks,
3862			  int flags)
3863{
3864	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3865	struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3866	struct reiserfs_journal_cnode *last_cn = NULL;
3867	struct reiserfs_journal_desc *desc;
3868	struct reiserfs_journal_commit *commit;
3869	struct buffer_head *c_bh;	/* commit bh */
3870	struct buffer_head *d_bh;	/* desc bh */
3871	int cur_write_start = 0;	/* start index of current log write */
3872	int old_start;
3873	int i;
3874	int flush = flags & FLUSH_ALL;
3875	int wait_on_commit = flags & WAIT;
3876	struct reiserfs_journal_list *jl, *temp_jl;
3877	struct list_head *entry, *safe;
3878	unsigned long jindex;
3879	unsigned long commit_trans_id;
3880	int trans_half;
3881
3882	BUG_ON(th->t_refcount > 1);
3883	BUG_ON(!th->t_trans_id);
3884
3885	put_fs_excl();
3886	current->journal_info = th->t_handle_save;
3887	reiserfs_check_lock_depth(p_s_sb, "journal end");
3888	if (journal->j_len == 0) {
3889		reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3890					     1);
3891		journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3892	}
3893
3894	lock_journal(p_s_sb);
3895	if (journal->j_next_full_flush) {
3896		flags |= FLUSH_ALL;
3897		flush = 1;
3898	}
3899	if (journal->j_next_async_flush) {
3900		flags |= COMMIT_NOW | WAIT;
3901		wait_on_commit = 1;
3902	}
3903
3904	/* check_journal_end locks the journal, and unlocks if it does not return 1
3905	 ** it tells us if we should continue with the journal_end, or just return
3906	 */
3907	if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
3908		p_s_sb->s_dirt = 1;
3909		wake_queued_writers(p_s_sb);
3910		reiserfs_async_progress_wait(p_s_sb);
3911		goto out;
3912	}
3913
3914	/* check_journal_end might set these, check again */
3915	if (journal->j_next_full_flush) {
3916		flush = 1;
3917	}
3918
3919	/*
3920	 ** j must wait means we have to flush the log blocks, and the real blocks for
3921	 ** this transaction
3922	 */
3923	if (journal->j_must_wait > 0) {
3924		flush = 1;
3925	}
3926#ifdef REISERFS_PREALLOCATE
3927	/* quota ops might need to nest, setup the journal_info pointer for them
3928	 * and raise the refcount so that it is > 0. */
3929	current->journal_info = th;
3930	th->t_refcount++;
3931	reiserfs_discard_all_prealloc(th);	/* it should not involve new blocks into
3932						 * the transaction */
3933	th->t_refcount--;
3934	current->journal_info = th->t_handle_save;
3935#endif
3936
3937	/* setup description block */
3938	d_bh =
3939	    journal_getblk(p_s_sb,
3940			   SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3941			   journal->j_start);
3942	set_buffer_uptodate(d_bh);
3943	desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
3944	memset(d_bh->b_data, 0, d_bh->b_size);
3945	memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
3946	set_desc_trans_id(desc, journal->j_trans_id);
3947
3948	/* setup commit block.  Don't write (keep it clean too) this one until after everyone else is written */
3949	c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3950			      ((journal->j_start + journal->j_len +
3951				1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
3952	commit = (struct reiserfs_journal_commit *)c_bh->b_data;
3953	memset(c_bh->b_data, 0, c_bh->b_size);
3954	set_commit_trans_id(commit, journal->j_trans_id);
3955	set_buffer_uptodate(c_bh);
3956
3957	/* init this journal list */
3958	jl = journal->j_current_jl;
3959
3960	/* we lock the commit before doing anything because
3961	 * we want to make sure nobody tries to run flush_commit_list until
3962	 * the new transaction is fully setup, and we've already flushed the
3963	 * ordered bh list
3964	 */
3965	down(&jl->j_commit_lock);
3966
3967	/* save the transaction id in case we need to commit it later */
3968	commit_trans_id = jl->j_trans_id;
3969
3970	atomic_set(&jl->j_older_commits_done, 0);
3971	jl->j_trans_id = journal->j_trans_id;
3972	jl->j_timestamp = journal->j_trans_start_time;
3973	jl->j_commit_bh = c_bh;
3974	jl->j_start = journal->j_start;
3975	jl->j_len = journal->j_len;
3976	atomic_set(&jl->j_nonzerolen, journal->j_len);
3977	atomic_set(&jl->j_commit_left, journal->j_len + 2);
3978	jl->j_realblock = NULL;
3979
3980	/* The ENTIRE FOR LOOP MUST not cause schedule to occur.
3981	 **  for each real block, add it to the journal list hash,
3982	 ** copy into real block index array in the commit or desc block
3983	 */
3984	trans_half = journal_trans_half(p_s_sb->s_blocksize);
3985	for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
3986		if (buffer_journaled(cn->bh)) {
3987			jl_cn = get_cnode(p_s_sb);
3988			if (!jl_cn) {
3989				reiserfs_panic(p_s_sb,
3990					       "journal-1676, get_cnode returned NULL\n");
3991			}
3992			if (i == 0) {
3993				jl->j_realblock = jl_cn;
3994			}
3995			jl_cn->prev = last_cn;
3996			jl_cn->next = NULL;
3997			if (last_cn) {
3998				last_cn->next = jl_cn;
3999			}
4000			last_cn = jl_cn;
4001			/* make sure the block we are trying to log is not a block
4002			   of journal or reserved area */
4003
4004			if (is_block_in_log_or_reserved_area
4005			    (p_s_sb, cn->bh->b_blocknr)) {
4006				reiserfs_panic(p_s_sb,
4007					       "journal-2332: Trying to log block %lu, which is a log block\n",
4008					       cn->bh->b_blocknr);
4009			}
4010			jl_cn->blocknr = cn->bh->b_blocknr;
4011			jl_cn->state = 0;
4012			jl_cn->sb = p_s_sb;
4013			jl_cn->bh = cn->bh;
4014			jl_cn->jlist = jl;
4015			insert_journal_hash(journal->j_list_hash_table, jl_cn);
4016			if (i < trans_half) {
4017				desc->j_realblock[i] =
4018				    cpu_to_le32(cn->bh->b_blocknr);
4019			} else {
4020				commit->j_realblock[i - trans_half] =
4021				    cpu_to_le32(cn->bh->b_blocknr);
4022			}
4023		} else {
4024			i--;
4025		}
4026	}
4027	set_desc_trans_len(desc, journal->j_len);
4028	set_desc_mount_id(desc, journal->j_mount_id);
4029	set_desc_trans_id(desc, journal->j_trans_id);
4030	set_commit_trans_len(commit, journal->j_len);
4031
4032	/* special check in case all buffers in the journal were marked for not logging */
4033	if (journal->j_len == 0) {
4034		BUG();
4035	}
4036
4037	/* we're about to dirty all the log blocks, mark the description block
4038	 * dirty now too.  Don't mark the commit block dirty until all the
4039	 * others are on disk
4040	 */
4041	mark_buffer_dirty(d_bh);
4042
4043	/* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4044	cur_write_start = journal->j_start;
4045	cn = journal->j_first;
4046	jindex = 1;		/* start at one so we don't get the desc again */
4047	while (cn) {
4048		clear_buffer_journal_new(cn->bh);
4049		/* copy all the real blocks into log area.  dirty log blocks */
4050		if (buffer_journaled(cn->bh)) {
4051			struct buffer_head *tmp_bh;
4052			char *addr;
4053			struct page *page;
4054			tmp_bh =
4055			    journal_getblk(p_s_sb,
4056					   SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4057					   ((cur_write_start +
4058					     jindex) %
4059					    SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
4060			set_buffer_uptodate(tmp_bh);
4061			page = cn->bh->b_page;
4062			addr = kmap(page);
4063			memcpy(tmp_bh->b_data,
4064			       addr + offset_in_page(cn->bh->b_data),
4065			       cn->bh->b_size);
4066			kunmap(page);
4067			mark_buffer_dirty(tmp_bh);
4068			jindex++;
4069			set_buffer_journal_dirty(cn->bh);
4070			clear_buffer_journaled(cn->bh);
4071		} else {
4072			/* JDirty cleared sometime during transaction.  don't log this one */
4073			reiserfs_warning(p_s_sb,
4074					 "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!");
4075			brelse(cn->bh);
4076		}
4077		next = cn->next;
4078		free_cnode(p_s_sb, cn);
4079		cn = next;
4080		cond_resched();
4081	}
4082
4083	/* we are done  with both the c_bh and d_bh, but
4084	 ** c_bh must be written after all other commit blocks,
4085	 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4086	 */
4087
4088	journal->j_current_jl = alloc_journal_list(p_s_sb);
4089
4090	/* now it is safe to insert this transaction on the main list */
4091	list_add_tail(&jl->j_list, &journal->j_journal_list);
4092	list_add_tail(&jl->j_working_list, &journal->j_working_list);
4093	journal->j_num_work_lists++;
4094
4095	/* reset journal values for the next transaction */
4096	old_start = journal->j_start;
4097	journal->j_start =
4098	    (journal->j_start + journal->j_len +
4099	     2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
4100	atomic_set(&(journal->j_wcount), 0);
4101	journal->j_bcount = 0;
4102	journal->j_last = NULL;
4103	journal->j_first = NULL;
4104	journal->j_len = 0;
4105	journal->j_trans_start_time = 0;
4106	journal->j_trans_id++;
4107	journal->j_current_jl->j_trans_id = journal->j_trans_id;
4108	journal->j_must_wait = 0;
4109	journal->j_len_alloc = 0;
4110	journal->j_next_full_flush = 0;
4111	journal->j_next_async_flush = 0;
4112	init_journal_hash(p_s_sb);
4113
4114	// make sure reiserfs_add_jh sees the new current_jl before we
4115	// write out the tails
4116	smp_mb();
4117
4118	/* tail conversion targets have to hit the disk before we end the
4119	 * transaction.  Otherwise a later transaction might repack the tail
4120	 * before this transaction commits, leaving the data block unflushed and
4121	 * clean, if we crash before the later transaction commits, the data block
4122	 * is lost.
4123	 */
4124	if (!list_empty(&jl->j_tail_bh_list)) {
4125		unlock_kernel();
4126		write_ordered_buffers(&journal->j_dirty_buffers_lock,
4127				      journal, jl, &jl->j_tail_bh_list);
4128		lock_kernel();
4129	}
4130	if (!list_empty(&jl->j_tail_bh_list))
4131		BUG();
4132	up(&jl->j_commit_lock);
4133
4134	/* honor the flush wishes from the caller, simple commits can
4135	 ** be done outside the journal lock, they are done below
4136	 **
4137	 ** if we don't flush the commit list right now, we put it into
4138	 ** the work queue so the people waiting on the async progress work
4139	 ** queue don't wait for this proc to flush journal lists and such.
4140	 */
4141	if (flush) {
4142		flush_commit_list(p_s_sb, jl, 1);
4143		flush_journal_list(p_s_sb, jl, 1);
4144	} else if (!(jl->j_state & LIST_COMMIT_PENDING))
4145		queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4146
4147	/* if the next transaction has any chance of wrapping, flush
4148	 ** transactions that might get overwritten.  If any journal lists are very
4149	 ** old flush them as well.
4150	 */
4151      first_jl:
4152	list_for_each_safe(entry, safe, &journal->j_journal_list) {
4153		temp_jl = JOURNAL_LIST_ENTRY(entry);
4154		if (journal->j_start <= temp_jl->j_start) {
4155			if ((journal->j_start + journal->j_trans_max + 1) >=
4156			    temp_jl->j_start) {
4157				flush_used_journal_lists(p_s_sb, temp_jl);
4158				goto first_jl;
4159			} else if ((journal->j_start +
4160				    journal->j_trans_max + 1) <
4161				   SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4162				/* if we don't cross into the next transaction and we don't
4163				 * wrap, there is no way we can overlap any later transactions
4164				 * break now
4165				 */
4166				break;
4167			}
4168		} else if ((journal->j_start +
4169			    journal->j_trans_max + 1) >
4170			   SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4171			if (((journal->j_start + journal->j_trans_max + 1) %
4172			     SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
4173			    temp_jl->j_start) {
4174				flush_used_journal_lists(p_s_sb, temp_jl);
4175				goto first_jl;
4176			} else {
4177				/* we don't overlap anything from out start to the end of the
4178				 * log, and our wrapped portion doesn't overlap anything at
4179				 * the start of the log.  We can break
4180				 */
4181				break;
4182			}
4183		}
4184	}
4185	flush_old_journal_lists(p_s_sb);
4186
4187	journal->j_current_jl->j_list_bitmap =
4188	    get_list_bitmap(p_s_sb, journal->j_current_jl);
4189
4190	if (!(journal->j_current_jl->j_list_bitmap)) {
4191		reiserfs_panic(p_s_sb,
4192			       "journal-1996: do_journal_end, could not get a list bitmap\n");
4193	}
4194
4195	atomic_set(&(journal->j_jlock), 0);
4196	unlock_journal(p_s_sb);
4197	/* wake up any body waiting to join. */
4198	clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4199	wake_up(&(journal->j_join_wait));
4200
4201	if (!flush && wait_on_commit &&
4202	    journal_list_still_alive(p_s_sb, commit_trans_id)) {
4203		flush_commit_list(p_s_sb, jl, 1);
4204	}
4205      out:
4206	reiserfs_check_lock_depth(p_s_sb, "journal end2");
4207
4208	memset(th, 0, sizeof(*th));
4209	/* Re-set th->t_super, so we can properly keep track of how many
4210	 * persistent transactions there are. We need to do this so if this
4211	 * call is part of a failed restart_transaction, we can free it later */
4212	th->t_super = p_s_sb;
4213
4214	return journal->j_errno;
4215}
4216
4217static void __reiserfs_journal_abort_hard(struct super_block *sb)
4218{
4219	struct reiserfs_journal *journal = SB_JOURNAL(sb);
4220	if (test_bit(J_ABORTED, &journal->j_state))
4221		return;
4222
4223	printk(KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
4224	       reiserfs_bdevname(sb));
4225
4226	sb->s_flags |= MS_RDONLY;
4227	set_bit(J_ABORTED, &journal->j_state);
4228
4229#ifdef CONFIG_REISERFS_CHECK
4230	dump_stack();
4231#endif
4232}
4233
4234static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
4235{
4236	struct reiserfs_journal *journal = SB_JOURNAL(sb);
4237	if (test_bit(J_ABORTED, &journal->j_state))
4238		return;
4239
4240	if (!journal->j_errno)
4241		journal->j_errno = errno;
4242
4243	__reiserfs_journal_abort_hard(sb);
4244}
4245
4246void reiserfs_journal_abort(struct super_block *sb, int errno)
4247{
4248	return __reiserfs_journal_abort_soft(sb, errno);
4249}
4250