journal.c revision 041e0e3b1970c508dc9a95b7dd9dc86271a7d7ac
1/* 2** Write ahead logging implementation copyright Chris Mason 2000 3** 4** The background commits make this code very interelated, and 5** overly complex. I need to rethink things a bit....The major players: 6** 7** journal_begin -- call with the number of blocks you expect to log. 8** If the current transaction is too 9** old, it will block until the current transaction is 10** finished, and then start a new one. 11** Usually, your transaction will get joined in with 12** previous ones for speed. 13** 14** journal_join -- same as journal_begin, but won't block on the current 15** transaction regardless of age. Don't ever call 16** this. Ever. There are only two places it should be 17** called from, and they are both inside this file. 18** 19** journal_mark_dirty -- adds blocks into this transaction. clears any flags 20** that might make them get sent to disk 21** and then marks them BH_JDirty. Puts the buffer head 22** into the current transaction hash. 23** 24** journal_end -- if the current transaction is batchable, it does nothing 25** otherwise, it could do an async/synchronous commit, or 26** a full flush of all log and real blocks in the 27** transaction. 28** 29** flush_old_commits -- if the current transaction is too old, it is ended and 30** commit blocks are sent to disk. Forces commit blocks 31** to disk for all backgrounded commits that have been 32** around too long. 33** -- Note, if you call this as an immediate flush from 34** from within kupdate, it will ignore the immediate flag 35*/ 36 37#include <linux/config.h> 38#include <asm/uaccess.h> 39#include <asm/system.h> 40 41#include <linux/time.h> 42#include <asm/semaphore.h> 43 44#include <linux/vmalloc.h> 45#include <linux/reiserfs_fs.h> 46 47#include <linux/kernel.h> 48#include <linux/errno.h> 49#include <linux/fcntl.h> 50#include <linux/stat.h> 51#include <linux/string.h> 52#include <linux/smp_lock.h> 53#include <linux/buffer_head.h> 54#include <linux/workqueue.h> 55#include <linux/writeback.h> 56#include <linux/blkdev.h> 57 58/* gets a struct reiserfs_journal_list * from a list head */ 59#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ 60 j_list)) 61#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ 62 j_working_list)) 63 64/* the number of mounted filesystems. This is used to decide when to 65** start and kill the commit workqueue 66*/ 67static int reiserfs_mounted_fs_count; 68 69static struct workqueue_struct *commit_wq; 70 71#define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit 72 structs at 4k */ 73#define BUFNR 64 /*read ahead */ 74 75/* cnode stat bits. Move these into reiserfs_fs.h */ 76 77#define BLOCK_FREED 2 /* this block was freed, and can't be written. */ 78#define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */ 79 80#define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */ 81#define BLOCK_DIRTIED 5 82 83/* journal list state bits */ 84#define LIST_TOUCHED 1 85#define LIST_DIRTY 2 86#define LIST_COMMIT_PENDING 4 /* someone will commit this list */ 87 88/* flags for do_journal_end */ 89#define FLUSH_ALL 1 /* flush commit and real blocks */ 90#define COMMIT_NOW 2 /* end and commit this transaction */ 91#define WAIT 4 /* wait for the log blocks to hit the disk */ 92 93static int do_journal_end(struct reiserfs_transaction_handle *, 94 struct super_block *, unsigned long nblocks, 95 int flags); 96static int flush_journal_list(struct super_block *s, 97 struct reiserfs_journal_list *jl, int flushall); 98static int flush_commit_list(struct super_block *s, 99 struct reiserfs_journal_list *jl, int flushall); 100static int can_dirty(struct reiserfs_journal_cnode *cn); 101static int journal_join(struct reiserfs_transaction_handle *th, 102 struct super_block *p_s_sb, unsigned long nblocks); 103static int release_journal_dev(struct super_block *super, 104 struct reiserfs_journal *journal); 105static int dirty_one_transaction(struct super_block *s, 106 struct reiserfs_journal_list *jl); 107static void flush_async_commits(void *p); 108static void queue_log_writer(struct super_block *s); 109 110/* values for join in do_journal_begin_r */ 111enum { 112 JBEGIN_REG = 0, /* regular journal begin */ 113 JBEGIN_JOIN = 1, /* join the running transaction if at all possible */ 114 JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */ 115}; 116 117static int do_journal_begin_r(struct reiserfs_transaction_handle *th, 118 struct super_block *p_s_sb, 119 unsigned long nblocks, int join); 120 121static void init_journal_hash(struct super_block *p_s_sb) 122{ 123 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 124 memset(journal->j_hash_table, 0, 125 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); 126} 127 128/* 129** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to 130** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for 131** more details. 132*/ 133static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) 134{ 135 if (bh) { 136 clear_buffer_dirty(bh); 137 clear_buffer_journal_test(bh); 138 } 139 return 0; 140} 141 142static void disable_barrier(struct super_block *s) 143{ 144 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH); 145 printk("reiserfs: disabling flush barriers on %s\n", 146 reiserfs_bdevname(s)); 147} 148 149static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block 150 *p_s_sb) 151{ 152 struct reiserfs_bitmap_node *bn; 153 static int id; 154 155 bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS, 156 p_s_sb); 157 if (!bn) { 158 return NULL; 159 } 160 bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb); 161 if (!bn->data) { 162 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb); 163 return NULL; 164 } 165 bn->id = id++; 166 memset(bn->data, 0, p_s_sb->s_blocksize); 167 INIT_LIST_HEAD(&bn->list); 168 return bn; 169} 170 171static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb) 172{ 173 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 174 struct reiserfs_bitmap_node *bn = NULL; 175 struct list_head *entry = journal->j_bitmap_nodes.next; 176 177 journal->j_used_bitmap_nodes++; 178 repeat: 179 180 if (entry != &journal->j_bitmap_nodes) { 181 bn = list_entry(entry, struct reiserfs_bitmap_node, list); 182 list_del(entry); 183 memset(bn->data, 0, p_s_sb->s_blocksize); 184 journal->j_free_bitmap_nodes--; 185 return bn; 186 } 187 bn = allocate_bitmap_node(p_s_sb); 188 if (!bn) { 189 yield(); 190 goto repeat; 191 } 192 return bn; 193} 194static inline void free_bitmap_node(struct super_block *p_s_sb, 195 struct reiserfs_bitmap_node *bn) 196{ 197 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 198 journal->j_used_bitmap_nodes--; 199 if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) { 200 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb); 201 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb); 202 } else { 203 list_add(&bn->list, &journal->j_bitmap_nodes); 204 journal->j_free_bitmap_nodes++; 205 } 206} 207 208static void allocate_bitmap_nodes(struct super_block *p_s_sb) 209{ 210 int i; 211 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 212 struct reiserfs_bitmap_node *bn = NULL; 213 for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) { 214 bn = allocate_bitmap_node(p_s_sb); 215 if (bn) { 216 list_add(&bn->list, &journal->j_bitmap_nodes); 217 journal->j_free_bitmap_nodes++; 218 } else { 219 break; // this is ok, we'll try again when more are needed 220 } 221 } 222} 223 224static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block, 225 struct reiserfs_list_bitmap *jb) 226{ 227 int bmap_nr = block / (p_s_sb->s_blocksize << 3); 228 int bit_nr = block % (p_s_sb->s_blocksize << 3); 229 230 if (!jb->bitmaps[bmap_nr]) { 231 jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb); 232 } 233 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data); 234 return 0; 235} 236 237static void cleanup_bitmap_list(struct super_block *p_s_sb, 238 struct reiserfs_list_bitmap *jb) 239{ 240 int i; 241 if (jb->bitmaps == NULL) 242 return; 243 244 for (i = 0; i < SB_BMAP_NR(p_s_sb); i++) { 245 if (jb->bitmaps[i]) { 246 free_bitmap_node(p_s_sb, jb->bitmaps[i]); 247 jb->bitmaps[i] = NULL; 248 } 249 } 250} 251 252/* 253** only call this on FS unmount. 254*/ 255static int free_list_bitmaps(struct super_block *p_s_sb, 256 struct reiserfs_list_bitmap *jb_array) 257{ 258 int i; 259 struct reiserfs_list_bitmap *jb; 260 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { 261 jb = jb_array + i; 262 jb->journal_list = NULL; 263 cleanup_bitmap_list(p_s_sb, jb); 264 vfree(jb->bitmaps); 265 jb->bitmaps = NULL; 266 } 267 return 0; 268} 269 270static int free_bitmap_nodes(struct super_block *p_s_sb) 271{ 272 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 273 struct list_head *next = journal->j_bitmap_nodes.next; 274 struct reiserfs_bitmap_node *bn; 275 276 while (next != &journal->j_bitmap_nodes) { 277 bn = list_entry(next, struct reiserfs_bitmap_node, list); 278 list_del(next); 279 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb); 280 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb); 281 next = journal->j_bitmap_nodes.next; 282 journal->j_free_bitmap_nodes--; 283 } 284 285 return 0; 286} 287 288/* 289** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. 290** jb_array is the array to be filled in. 291*/ 292int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, 293 struct reiserfs_list_bitmap *jb_array, 294 int bmap_nr) 295{ 296 int i; 297 int failed = 0; 298 struct reiserfs_list_bitmap *jb; 299 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *); 300 301 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { 302 jb = jb_array + i; 303 jb->journal_list = NULL; 304 jb->bitmaps = vmalloc(mem); 305 if (!jb->bitmaps) { 306 reiserfs_warning(p_s_sb, 307 "clm-2000, unable to allocate bitmaps for journal lists"); 308 failed = 1; 309 break; 310 } 311 memset(jb->bitmaps, 0, mem); 312 } 313 if (failed) { 314 free_list_bitmaps(p_s_sb, jb_array); 315 return -1; 316 } 317 return 0; 318} 319 320/* 321** find an available list bitmap. If you can't find one, flush a commit list 322** and try again 323*/ 324static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb, 325 struct reiserfs_journal_list 326 *jl) 327{ 328 int i, j; 329 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 330 struct reiserfs_list_bitmap *jb = NULL; 331 332 for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) { 333 i = journal->j_list_bitmap_index; 334 journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS; 335 jb = journal->j_list_bitmap + i; 336 if (journal->j_list_bitmap[i].journal_list) { 337 flush_commit_list(p_s_sb, 338 journal->j_list_bitmap[i]. 339 journal_list, 1); 340 if (!journal->j_list_bitmap[i].journal_list) { 341 break; 342 } 343 } else { 344 break; 345 } 346 } 347 if (jb->journal_list) { /* double check to make sure if flushed correctly */ 348 return NULL; 349 } 350 jb->journal_list = jl; 351 return jb; 352} 353 354/* 355** allocates a new chunk of X nodes, and links them all together as a list. 356** Uses the cnode->next and cnode->prev pointers 357** returns NULL on failure 358*/ 359static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) 360{ 361 struct reiserfs_journal_cnode *head; 362 int i; 363 if (num_cnodes <= 0) { 364 return NULL; 365 } 366 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)); 367 if (!head) { 368 return NULL; 369 } 370 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)); 371 head[0].prev = NULL; 372 head[0].next = head + 1; 373 for (i = 1; i < num_cnodes; i++) { 374 head[i].prev = head + (i - 1); 375 head[i].next = head + (i + 1); /* if last one, overwrite it after the if */ 376 } 377 head[num_cnodes - 1].next = NULL; 378 return head; 379} 380 381/* 382** pulls a cnode off the free list, or returns NULL on failure 383*/ 384static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) 385{ 386 struct reiserfs_journal_cnode *cn; 387 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 388 389 reiserfs_check_lock_depth(p_s_sb, "get_cnode"); 390 391 if (journal->j_cnode_free <= 0) { 392 return NULL; 393 } 394 journal->j_cnode_used++; 395 journal->j_cnode_free--; 396 cn = journal->j_cnode_free_list; 397 if (!cn) { 398 return cn; 399 } 400 if (cn->next) { 401 cn->next->prev = NULL; 402 } 403 journal->j_cnode_free_list = cn->next; 404 memset(cn, 0, sizeof(struct reiserfs_journal_cnode)); 405 return cn; 406} 407 408/* 409** returns a cnode to the free list 410*/ 411static void free_cnode(struct super_block *p_s_sb, 412 struct reiserfs_journal_cnode *cn) 413{ 414 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 415 416 reiserfs_check_lock_depth(p_s_sb, "free_cnode"); 417 418 journal->j_cnode_used--; 419 journal->j_cnode_free++; 420 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */ 421 cn->next = journal->j_cnode_free_list; 422 if (journal->j_cnode_free_list) { 423 journal->j_cnode_free_list->prev = cn; 424 } 425 cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */ 426 journal->j_cnode_free_list = cn; 427} 428 429static void clear_prepared_bits(struct buffer_head *bh) 430{ 431 clear_buffer_journal_prepared(bh); 432 clear_buffer_journal_restore_dirty(bh); 433} 434 435/* utility function to force a BUG if it is called without the big 436** kernel lock held. caller is the string printed just before calling BUG() 437*/ 438void reiserfs_check_lock_depth(struct super_block *sb, char *caller) 439{ 440#ifdef CONFIG_SMP 441 if (current->lock_depth < 0) { 442 reiserfs_panic(sb, "%s called without kernel lock held", 443 caller); 444 } 445#else 446 ; 447#endif 448} 449 450/* return a cnode with same dev, block number and size in table, or null if not found */ 451static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct 452 super_block 453 *sb, 454 struct 455 reiserfs_journal_cnode 456 **table, 457 long bl) 458{ 459 struct reiserfs_journal_cnode *cn; 460 cn = journal_hash(table, sb, bl); 461 while (cn) { 462 if (cn->blocknr == bl && cn->sb == sb) 463 return cn; 464 cn = cn->hnext; 465 } 466 return (struct reiserfs_journal_cnode *)0; 467} 468 469/* 470** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated 471** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever 472** being overwritten by a replay after crashing. 473** 474** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting 475** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make 476** sure you never write the block without logging it. 477** 478** next_zero_bit is a suggestion about the next block to try for find_forward. 479** when bl is rejected because it is set in a journal list bitmap, we search 480** for the next zero bit in the bitmap that rejected bl. Then, we return that 481** through next_zero_bit for find_forward to try. 482** 483** Just because we return something in next_zero_bit does not mean we won't 484** reject it on the next call to reiserfs_in_journal 485** 486*/ 487int reiserfs_in_journal(struct super_block *p_s_sb, 488 int bmap_nr, int bit_nr, int search_all, 489 b_blocknr_t * next_zero_bit) 490{ 491 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 492 struct reiserfs_journal_cnode *cn; 493 struct reiserfs_list_bitmap *jb; 494 int i; 495 unsigned long bl; 496 497 *next_zero_bit = 0; /* always start this at zero. */ 498 499 PROC_INFO_INC(p_s_sb, journal.in_journal); 500 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use. 501 ** if we crash before the transaction that freed it commits, this transaction won't 502 ** have committed either, and the block will never be written 503 */ 504 if (search_all) { 505 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { 506 PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap); 507 jb = journal->j_list_bitmap + i; 508 if (jb->journal_list && jb->bitmaps[bmap_nr] && 509 test_bit(bit_nr, 510 (unsigned long *)jb->bitmaps[bmap_nr]-> 511 data)) { 512 *next_zero_bit = 513 find_next_zero_bit((unsigned long *) 514 (jb->bitmaps[bmap_nr]-> 515 data), 516 p_s_sb->s_blocksize << 3, 517 bit_nr + 1); 518 return 1; 519 } 520 } 521 } 522 523 bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr; 524 /* is it in any old transactions? */ 525 if (search_all 526 && (cn = 527 get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) { 528 return 1; 529 } 530 531 /* is it in the current transaction. This should never happen */ 532 if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) { 533 BUG(); 534 return 1; 535 } 536 537 PROC_INFO_INC(p_s_sb, journal.in_journal_reusable); 538 /* safe for reuse */ 539 return 0; 540} 541 542/* insert cn into table 543*/ 544static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, 545 struct reiserfs_journal_cnode *cn) 546{ 547 struct reiserfs_journal_cnode *cn_orig; 548 549 cn_orig = journal_hash(table, cn->sb, cn->blocknr); 550 cn->hnext = cn_orig; 551 cn->hprev = NULL; 552 if (cn_orig) { 553 cn_orig->hprev = cn; 554 } 555 journal_hash(table, cn->sb, cn->blocknr) = cn; 556} 557 558/* lock the current transaction */ 559static inline void lock_journal(struct super_block *p_s_sb) 560{ 561 PROC_INFO_INC(p_s_sb, journal.lock_journal); 562 down(&SB_JOURNAL(p_s_sb)->j_lock); 563} 564 565/* unlock the current transaction */ 566static inline void unlock_journal(struct super_block *p_s_sb) 567{ 568 up(&SB_JOURNAL(p_s_sb)->j_lock); 569} 570 571static inline void get_journal_list(struct reiserfs_journal_list *jl) 572{ 573 jl->j_refcount++; 574} 575 576static inline void put_journal_list(struct super_block *s, 577 struct reiserfs_journal_list *jl) 578{ 579 if (jl->j_refcount < 1) { 580 reiserfs_panic(s, "trans id %lu, refcount at %d", 581 jl->j_trans_id, jl->j_refcount); 582 } 583 if (--jl->j_refcount == 0) 584 reiserfs_kfree(jl, sizeof(struct reiserfs_journal_list), s); 585} 586 587/* 588** this used to be much more involved, and I'm keeping it just in case things get ugly again. 589** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a 590** transaction. 591*/ 592static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, 593 struct reiserfs_journal_list *jl) 594{ 595 596 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap; 597 if (jb) { 598 cleanup_bitmap_list(p_s_sb, jb); 599 } 600 jl->j_list_bitmap->journal_list = NULL; 601 jl->j_list_bitmap = NULL; 602} 603 604static int journal_list_still_alive(struct super_block *s, 605 unsigned long trans_id) 606{ 607 struct reiserfs_journal *journal = SB_JOURNAL(s); 608 struct list_head *entry = &journal->j_journal_list; 609 struct reiserfs_journal_list *jl; 610 611 if (!list_empty(entry)) { 612 jl = JOURNAL_LIST_ENTRY(entry->next); 613 if (jl->j_trans_id <= trans_id) { 614 return 1; 615 } 616 } 617 return 0; 618} 619 620static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) 621{ 622 char b[BDEVNAME_SIZE]; 623 624 if (buffer_journaled(bh)) { 625 reiserfs_warning(NULL, 626 "clm-2084: pinned buffer %lu:%s sent to disk", 627 bh->b_blocknr, bdevname(bh->b_bdev, b)); 628 } 629 if (uptodate) 630 set_buffer_uptodate(bh); 631 else 632 clear_buffer_uptodate(bh); 633 unlock_buffer(bh); 634 put_bh(bh); 635} 636 637static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) 638{ 639 if (uptodate) 640 set_buffer_uptodate(bh); 641 else 642 clear_buffer_uptodate(bh); 643 unlock_buffer(bh); 644 put_bh(bh); 645} 646 647static void submit_logged_buffer(struct buffer_head *bh) 648{ 649 get_bh(bh); 650 bh->b_end_io = reiserfs_end_buffer_io_sync; 651 clear_buffer_journal_new(bh); 652 clear_buffer_dirty(bh); 653 if (!test_clear_buffer_journal_test(bh)) 654 BUG(); 655 if (!buffer_uptodate(bh)) 656 BUG(); 657 submit_bh(WRITE, bh); 658} 659 660static void submit_ordered_buffer(struct buffer_head *bh) 661{ 662 get_bh(bh); 663 bh->b_end_io = reiserfs_end_ordered_io; 664 clear_buffer_dirty(bh); 665 if (!buffer_uptodate(bh)) 666 BUG(); 667 submit_bh(WRITE, bh); 668} 669 670static int submit_barrier_buffer(struct buffer_head *bh) 671{ 672 get_bh(bh); 673 bh->b_end_io = reiserfs_end_ordered_io; 674 clear_buffer_dirty(bh); 675 if (!buffer_uptodate(bh)) 676 BUG(); 677 return submit_bh(WRITE_BARRIER, bh); 678} 679 680static void check_barrier_completion(struct super_block *s, 681 struct buffer_head *bh) 682{ 683 if (buffer_eopnotsupp(bh)) { 684 clear_buffer_eopnotsupp(bh); 685 disable_barrier(s); 686 set_buffer_uptodate(bh); 687 set_buffer_dirty(bh); 688 sync_dirty_buffer(bh); 689 } 690} 691 692#define CHUNK_SIZE 32 693struct buffer_chunk { 694 struct buffer_head *bh[CHUNK_SIZE]; 695 int nr; 696}; 697 698static void write_chunk(struct buffer_chunk *chunk) 699{ 700 int i; 701 get_fs_excl(); 702 for (i = 0; i < chunk->nr; i++) { 703 submit_logged_buffer(chunk->bh[i]); 704 } 705 chunk->nr = 0; 706 put_fs_excl(); 707} 708 709static void write_ordered_chunk(struct buffer_chunk *chunk) 710{ 711 int i; 712 get_fs_excl(); 713 for (i = 0; i < chunk->nr; i++) { 714 submit_ordered_buffer(chunk->bh[i]); 715 } 716 chunk->nr = 0; 717 put_fs_excl(); 718} 719 720static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, 721 spinlock_t * lock, void (fn) (struct buffer_chunk *)) 722{ 723 int ret = 0; 724 if (chunk->nr >= CHUNK_SIZE) 725 BUG(); 726 chunk->bh[chunk->nr++] = bh; 727 if (chunk->nr >= CHUNK_SIZE) { 728 ret = 1; 729 if (lock) 730 spin_unlock(lock); 731 fn(chunk); 732 if (lock) 733 spin_lock(lock); 734 } 735 return ret; 736} 737 738static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0); 739static struct reiserfs_jh *alloc_jh(void) 740{ 741 struct reiserfs_jh *jh; 742 while (1) { 743 jh = kmalloc(sizeof(*jh), GFP_NOFS); 744 if (jh) { 745 atomic_inc(&nr_reiserfs_jh); 746 return jh; 747 } 748 yield(); 749 } 750} 751 752/* 753 * we want to free the jh when the buffer has been written 754 * and waited on 755 */ 756void reiserfs_free_jh(struct buffer_head *bh) 757{ 758 struct reiserfs_jh *jh; 759 760 jh = bh->b_private; 761 if (jh) { 762 bh->b_private = NULL; 763 jh->bh = NULL; 764 list_del_init(&jh->list); 765 kfree(jh); 766 if (atomic_read(&nr_reiserfs_jh) <= 0) 767 BUG(); 768 atomic_dec(&nr_reiserfs_jh); 769 put_bh(bh); 770 } 771} 772 773static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh, 774 int tail) 775{ 776 struct reiserfs_jh *jh; 777 778 if (bh->b_private) { 779 spin_lock(&j->j_dirty_buffers_lock); 780 if (!bh->b_private) { 781 spin_unlock(&j->j_dirty_buffers_lock); 782 goto no_jh; 783 } 784 jh = bh->b_private; 785 list_del_init(&jh->list); 786 } else { 787 no_jh: 788 get_bh(bh); 789 jh = alloc_jh(); 790 spin_lock(&j->j_dirty_buffers_lock); 791 /* buffer must be locked for __add_jh, should be able to have 792 * two adds at the same time 793 */ 794 if (bh->b_private) 795 BUG(); 796 jh->bh = bh; 797 bh->b_private = jh; 798 } 799 jh->jl = j->j_current_jl; 800 if (tail) 801 list_add_tail(&jh->list, &jh->jl->j_tail_bh_list); 802 else { 803 list_add_tail(&jh->list, &jh->jl->j_bh_list); 804 } 805 spin_unlock(&j->j_dirty_buffers_lock); 806 return 0; 807} 808 809int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) 810{ 811 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1); 812} 813int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) 814{ 815 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0); 816} 817 818#define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list) 819static int write_ordered_buffers(spinlock_t * lock, 820 struct reiserfs_journal *j, 821 struct reiserfs_journal_list *jl, 822 struct list_head *list) 823{ 824 struct buffer_head *bh; 825 struct reiserfs_jh *jh; 826 int ret = j->j_errno; 827 struct buffer_chunk chunk; 828 struct list_head tmp; 829 INIT_LIST_HEAD(&tmp); 830 831 chunk.nr = 0; 832 spin_lock(lock); 833 while (!list_empty(list)) { 834 jh = JH_ENTRY(list->next); 835 bh = jh->bh; 836 get_bh(bh); 837 if (test_set_buffer_locked(bh)) { 838 if (!buffer_dirty(bh)) { 839 list_del_init(&jh->list); 840 list_add(&jh->list, &tmp); 841 goto loop_next; 842 } 843 spin_unlock(lock); 844 if (chunk.nr) 845 write_ordered_chunk(&chunk); 846 wait_on_buffer(bh); 847 cond_resched(); 848 spin_lock(lock); 849 goto loop_next; 850 } 851 if (buffer_dirty(bh)) { 852 list_del_init(&jh->list); 853 list_add(&jh->list, &tmp); 854 add_to_chunk(&chunk, bh, lock, write_ordered_chunk); 855 } else { 856 reiserfs_free_jh(bh); 857 unlock_buffer(bh); 858 } 859 loop_next: 860 put_bh(bh); 861 cond_resched_lock(lock); 862 } 863 if (chunk.nr) { 864 spin_unlock(lock); 865 write_ordered_chunk(&chunk); 866 spin_lock(lock); 867 } 868 while (!list_empty(&tmp)) { 869 jh = JH_ENTRY(tmp.prev); 870 bh = jh->bh; 871 get_bh(bh); 872 reiserfs_free_jh(bh); 873 874 if (buffer_locked(bh)) { 875 spin_unlock(lock); 876 wait_on_buffer(bh); 877 spin_lock(lock); 878 } 879 if (!buffer_uptodate(bh)) { 880 ret = -EIO; 881 } 882 put_bh(bh); 883 cond_resched_lock(lock); 884 } 885 spin_unlock(lock); 886 return ret; 887} 888 889static int flush_older_commits(struct super_block *s, 890 struct reiserfs_journal_list *jl) 891{ 892 struct reiserfs_journal *journal = SB_JOURNAL(s); 893 struct reiserfs_journal_list *other_jl; 894 struct reiserfs_journal_list *first_jl; 895 struct list_head *entry; 896 unsigned long trans_id = jl->j_trans_id; 897 unsigned long other_trans_id; 898 unsigned long first_trans_id; 899 900 find_first: 901 /* 902 * first we walk backwards to find the oldest uncommitted transation 903 */ 904 first_jl = jl; 905 entry = jl->j_list.prev; 906 while (1) { 907 other_jl = JOURNAL_LIST_ENTRY(entry); 908 if (entry == &journal->j_journal_list || 909 atomic_read(&other_jl->j_older_commits_done)) 910 break; 911 912 first_jl = other_jl; 913 entry = other_jl->j_list.prev; 914 } 915 916 /* if we didn't find any older uncommitted transactions, return now */ 917 if (first_jl == jl) { 918 return 0; 919 } 920 921 first_trans_id = first_jl->j_trans_id; 922 923 entry = &first_jl->j_list; 924 while (1) { 925 other_jl = JOURNAL_LIST_ENTRY(entry); 926 other_trans_id = other_jl->j_trans_id; 927 928 if (other_trans_id < trans_id) { 929 if (atomic_read(&other_jl->j_commit_left) != 0) { 930 flush_commit_list(s, other_jl, 0); 931 932 /* list we were called with is gone, return */ 933 if (!journal_list_still_alive(s, trans_id)) 934 return 1; 935 936 /* the one we just flushed is gone, this means all 937 * older lists are also gone, so first_jl is no longer 938 * valid either. Go back to the beginning. 939 */ 940 if (!journal_list_still_alive 941 (s, other_trans_id)) { 942 goto find_first; 943 } 944 } 945 entry = entry->next; 946 if (entry == &journal->j_journal_list) 947 return 0; 948 } else { 949 return 0; 950 } 951 } 952 return 0; 953} 954int reiserfs_async_progress_wait(struct super_block *s) 955{ 956 DEFINE_WAIT(wait); 957 struct reiserfs_journal *j = SB_JOURNAL(s); 958 if (atomic_read(&j->j_async_throttle)) 959 blk_congestion_wait(WRITE, HZ / 10); 960 return 0; 961} 962 963/* 964** if this journal list still has commit blocks unflushed, send them to disk. 965** 966** log areas must be flushed in order (transaction 2 can't commit before transaction 1) 967** Before the commit block can by written, every other log block must be safely on disk 968** 969*/ 970static int flush_commit_list(struct super_block *s, 971 struct reiserfs_journal_list *jl, int flushall) 972{ 973 int i; 974 int bn; 975 struct buffer_head *tbh = NULL; 976 unsigned long trans_id = jl->j_trans_id; 977 struct reiserfs_journal *journal = SB_JOURNAL(s); 978 int barrier = 0; 979 int retval = 0; 980 981 reiserfs_check_lock_depth(s, "flush_commit_list"); 982 983 if (atomic_read(&jl->j_older_commits_done)) { 984 return 0; 985 } 986 987 get_fs_excl(); 988 989 /* before we can put our commit blocks on disk, we have to make sure everyone older than 990 ** us is on disk too 991 */ 992 BUG_ON(jl->j_len <= 0); 993 BUG_ON(trans_id == journal->j_trans_id); 994 995 get_journal_list(jl); 996 if (flushall) { 997 if (flush_older_commits(s, jl) == 1) { 998 /* list disappeared during flush_older_commits. return */ 999 goto put_jl; 1000 } 1001 } 1002 1003 /* make sure nobody is trying to flush this one at the same time */ 1004 down(&jl->j_commit_lock); 1005 if (!journal_list_still_alive(s, trans_id)) { 1006 up(&jl->j_commit_lock); 1007 goto put_jl; 1008 } 1009 BUG_ON(jl->j_trans_id == 0); 1010 1011 /* this commit is done, exit */ 1012 if (atomic_read(&(jl->j_commit_left)) <= 0) { 1013 if (flushall) { 1014 atomic_set(&(jl->j_older_commits_done), 1); 1015 } 1016 up(&jl->j_commit_lock); 1017 goto put_jl; 1018 } 1019 1020 if (!list_empty(&jl->j_bh_list)) { 1021 unlock_kernel(); 1022 write_ordered_buffers(&journal->j_dirty_buffers_lock, 1023 journal, jl, &jl->j_bh_list); 1024 lock_kernel(); 1025 } 1026 BUG_ON(!list_empty(&jl->j_bh_list)); 1027 /* 1028 * for the description block and all the log blocks, submit any buffers 1029 * that haven't already reached the disk 1030 */ 1031 atomic_inc(&journal->j_async_throttle); 1032 for (i = 0; i < (jl->j_len + 1); i++) { 1033 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % 1034 SB_ONDISK_JOURNAL_SIZE(s); 1035 tbh = journal_find_get_block(s, bn); 1036 if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */ 1037 ll_rw_block(SWRITE, 1, &tbh); 1038 put_bh(tbh); 1039 } 1040 atomic_dec(&journal->j_async_throttle); 1041 1042 /* wait on everything written so far before writing the commit 1043 * if we are in barrier mode, send the commit down now 1044 */ 1045 barrier = reiserfs_barrier_flush(s); 1046 if (barrier) { 1047 int ret; 1048 lock_buffer(jl->j_commit_bh); 1049 ret = submit_barrier_buffer(jl->j_commit_bh); 1050 if (ret == -EOPNOTSUPP) { 1051 set_buffer_uptodate(jl->j_commit_bh); 1052 disable_barrier(s); 1053 barrier = 0; 1054 } 1055 } 1056 for (i = 0; i < (jl->j_len + 1); i++) { 1057 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + 1058 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); 1059 tbh = journal_find_get_block(s, bn); 1060 wait_on_buffer(tbh); 1061 // since we're using ll_rw_blk above, it might have skipped over 1062 // a locked buffer. Double check here 1063 // 1064 if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */ 1065 sync_dirty_buffer(tbh); 1066 if (unlikely(!buffer_uptodate(tbh))) { 1067#ifdef CONFIG_REISERFS_CHECK 1068 reiserfs_warning(s, "journal-601, buffer write failed"); 1069#endif 1070 retval = -EIO; 1071 } 1072 put_bh(tbh); /* once for journal_find_get_block */ 1073 put_bh(tbh); /* once due to original getblk in do_journal_end */ 1074 atomic_dec(&(jl->j_commit_left)); 1075 } 1076 1077 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1); 1078 1079 if (!barrier) { 1080 if (buffer_dirty(jl->j_commit_bh)) 1081 BUG(); 1082 mark_buffer_dirty(jl->j_commit_bh); 1083 sync_dirty_buffer(jl->j_commit_bh); 1084 } else 1085 wait_on_buffer(jl->j_commit_bh); 1086 1087 check_barrier_completion(s, jl->j_commit_bh); 1088 1089 /* If there was a write error in the journal - we can't commit this 1090 * transaction - it will be invalid and, if successful, will just end 1091 * up propogating the write error out to the filesystem. */ 1092 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) { 1093#ifdef CONFIG_REISERFS_CHECK 1094 reiserfs_warning(s, "journal-615: buffer write failed"); 1095#endif 1096 retval = -EIO; 1097 } 1098 bforget(jl->j_commit_bh); 1099 if (journal->j_last_commit_id != 0 && 1100 (jl->j_trans_id - journal->j_last_commit_id) != 1) { 1101 reiserfs_warning(s, "clm-2200: last commit %lu, current %lu", 1102 journal->j_last_commit_id, jl->j_trans_id); 1103 } 1104 journal->j_last_commit_id = jl->j_trans_id; 1105 1106 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */ 1107 cleanup_freed_for_journal_list(s, jl); 1108 1109 retval = retval ? retval : journal->j_errno; 1110 1111 /* mark the metadata dirty */ 1112 if (!retval) 1113 dirty_one_transaction(s, jl); 1114 atomic_dec(&(jl->j_commit_left)); 1115 1116 if (flushall) { 1117 atomic_set(&(jl->j_older_commits_done), 1); 1118 } 1119 up(&jl->j_commit_lock); 1120 put_jl: 1121 put_journal_list(s, jl); 1122 1123 if (retval) 1124 reiserfs_abort(s, retval, "Journal write error in %s", 1125 __FUNCTION__); 1126 put_fs_excl(); 1127 return retval; 1128} 1129 1130/* 1131** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or 1132** returns NULL if it can't find anything 1133*/ 1134static struct reiserfs_journal_list *find_newer_jl_for_cn(struct 1135 reiserfs_journal_cnode 1136 *cn) 1137{ 1138 struct super_block *sb = cn->sb; 1139 b_blocknr_t blocknr = cn->blocknr; 1140 1141 cn = cn->hprev; 1142 while (cn) { 1143 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) { 1144 return cn->jlist; 1145 } 1146 cn = cn->hprev; 1147 } 1148 return NULL; 1149} 1150 1151static void remove_journal_hash(struct super_block *, 1152 struct reiserfs_journal_cnode **, 1153 struct reiserfs_journal_list *, unsigned long, 1154 int); 1155 1156/* 1157** once all the real blocks have been flushed, it is safe to remove them from the 1158** journal list for this transaction. Aside from freeing the cnode, this also allows the 1159** block to be reallocated for data blocks if it had been deleted. 1160*/ 1161static void remove_all_from_journal_list(struct super_block *p_s_sb, 1162 struct reiserfs_journal_list *jl, 1163 int debug) 1164{ 1165 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 1166 struct reiserfs_journal_cnode *cn, *last; 1167 cn = jl->j_realblock; 1168 1169 /* which is better, to lock once around the whole loop, or 1170 ** to lock for each call to remove_journal_hash? 1171 */ 1172 while (cn) { 1173 if (cn->blocknr != 0) { 1174 if (debug) { 1175 reiserfs_warning(p_s_sb, 1176 "block %u, bh is %d, state %ld", 1177 cn->blocknr, cn->bh ? 1 : 0, 1178 cn->state); 1179 } 1180 cn->state = 0; 1181 remove_journal_hash(p_s_sb, journal->j_list_hash_table, 1182 jl, cn->blocknr, 1); 1183 } 1184 last = cn; 1185 cn = cn->next; 1186 free_cnode(p_s_sb, last); 1187 } 1188 jl->j_realblock = NULL; 1189} 1190 1191/* 1192** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block. 1193** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start 1194** releasing blocks in this transaction for reuse as data blocks. 1195** called by flush_journal_list, before it calls remove_all_from_journal_list 1196** 1197*/ 1198static int _update_journal_header_block(struct super_block *p_s_sb, 1199 unsigned long offset, 1200 unsigned long trans_id) 1201{ 1202 struct reiserfs_journal_header *jh; 1203 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 1204 1205 if (reiserfs_is_journal_aborted(journal)) 1206 return -EIO; 1207 1208 if (trans_id >= journal->j_last_flush_trans_id) { 1209 if (buffer_locked((journal->j_header_bh))) { 1210 wait_on_buffer((journal->j_header_bh)); 1211 if (unlikely(!buffer_uptodate(journal->j_header_bh))) { 1212#ifdef CONFIG_REISERFS_CHECK 1213 reiserfs_warning(p_s_sb, 1214 "journal-699: buffer write failed"); 1215#endif 1216 return -EIO; 1217 } 1218 } 1219 journal->j_last_flush_trans_id = trans_id; 1220 journal->j_first_unflushed_offset = offset; 1221 jh = (struct reiserfs_journal_header *)(journal->j_header_bh-> 1222 b_data); 1223 jh->j_last_flush_trans_id = cpu_to_le32(trans_id); 1224 jh->j_first_unflushed_offset = cpu_to_le32(offset); 1225 jh->j_mount_id = cpu_to_le32(journal->j_mount_id); 1226 1227 if (reiserfs_barrier_flush(p_s_sb)) { 1228 int ret; 1229 lock_buffer(journal->j_header_bh); 1230 ret = submit_barrier_buffer(journal->j_header_bh); 1231 if (ret == -EOPNOTSUPP) { 1232 set_buffer_uptodate(journal->j_header_bh); 1233 disable_barrier(p_s_sb); 1234 goto sync; 1235 } 1236 wait_on_buffer(journal->j_header_bh); 1237 check_barrier_completion(p_s_sb, journal->j_header_bh); 1238 } else { 1239 sync: 1240 set_buffer_dirty(journal->j_header_bh); 1241 sync_dirty_buffer(journal->j_header_bh); 1242 } 1243 if (!buffer_uptodate(journal->j_header_bh)) { 1244 reiserfs_warning(p_s_sb, 1245 "journal-837: IO error during journal replay"); 1246 return -EIO; 1247 } 1248 } 1249 return 0; 1250} 1251 1252static int update_journal_header_block(struct super_block *p_s_sb, 1253 unsigned long offset, 1254 unsigned long trans_id) 1255{ 1256 return _update_journal_header_block(p_s_sb, offset, trans_id); 1257} 1258 1259/* 1260** flush any and all journal lists older than you are 1261** can only be called from flush_journal_list 1262*/ 1263static int flush_older_journal_lists(struct super_block *p_s_sb, 1264 struct reiserfs_journal_list *jl) 1265{ 1266 struct list_head *entry; 1267 struct reiserfs_journal_list *other_jl; 1268 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 1269 unsigned long trans_id = jl->j_trans_id; 1270 1271 /* we know we are the only ones flushing things, no extra race 1272 * protection is required. 1273 */ 1274 restart: 1275 entry = journal->j_journal_list.next; 1276 /* Did we wrap? */ 1277 if (entry == &journal->j_journal_list) 1278 return 0; 1279 other_jl = JOURNAL_LIST_ENTRY(entry); 1280 if (other_jl->j_trans_id < trans_id) { 1281 BUG_ON(other_jl->j_refcount <= 0); 1282 /* do not flush all */ 1283 flush_journal_list(p_s_sb, other_jl, 0); 1284 1285 /* other_jl is now deleted from the list */ 1286 goto restart; 1287 } 1288 return 0; 1289} 1290 1291static void del_from_work_list(struct super_block *s, 1292 struct reiserfs_journal_list *jl) 1293{ 1294 struct reiserfs_journal *journal = SB_JOURNAL(s); 1295 if (!list_empty(&jl->j_working_list)) { 1296 list_del_init(&jl->j_working_list); 1297 journal->j_num_work_lists--; 1298 } 1299} 1300 1301/* flush a journal list, both commit and real blocks 1302** 1303** always set flushall to 1, unless you are calling from inside 1304** flush_journal_list 1305** 1306** IMPORTANT. This can only be called while there are no journal writers, 1307** and the journal is locked. That means it can only be called from 1308** do_journal_end, or by journal_release 1309*/ 1310static int flush_journal_list(struct super_block *s, 1311 struct reiserfs_journal_list *jl, int flushall) 1312{ 1313 struct reiserfs_journal_list *pjl; 1314 struct reiserfs_journal_cnode *cn, *last; 1315 int count; 1316 int was_jwait = 0; 1317 int was_dirty = 0; 1318 struct buffer_head *saved_bh; 1319 unsigned long j_len_saved = jl->j_len; 1320 struct reiserfs_journal *journal = SB_JOURNAL(s); 1321 int err = 0; 1322 1323 BUG_ON(j_len_saved <= 0); 1324 1325 if (atomic_read(&journal->j_wcount) != 0) { 1326 reiserfs_warning(s, 1327 "clm-2048: flush_journal_list called with wcount %d", 1328 atomic_read(&journal->j_wcount)); 1329 } 1330 BUG_ON(jl->j_trans_id == 0); 1331 1332 /* if flushall == 0, the lock is already held */ 1333 if (flushall) { 1334 down(&journal->j_flush_sem); 1335 } else if (!down_trylock(&journal->j_flush_sem)) { 1336 BUG(); 1337 } 1338 1339 count = 0; 1340 if (j_len_saved > journal->j_trans_max) { 1341 reiserfs_panic(s, 1342 "journal-715: flush_journal_list, length is %lu, trans id %lu\n", 1343 j_len_saved, jl->j_trans_id); 1344 return 0; 1345 } 1346 1347 get_fs_excl(); 1348 1349 /* if all the work is already done, get out of here */ 1350 if (atomic_read(&(jl->j_nonzerolen)) <= 0 && 1351 atomic_read(&(jl->j_commit_left)) <= 0) { 1352 goto flush_older_and_return; 1353 } 1354 1355 /* start by putting the commit list on disk. This will also flush 1356 ** the commit lists of any olders transactions 1357 */ 1358 flush_commit_list(s, jl, 1); 1359 1360 if (!(jl->j_state & LIST_DIRTY) 1361 && !reiserfs_is_journal_aborted(journal)) 1362 BUG(); 1363 1364 /* are we done now? */ 1365 if (atomic_read(&(jl->j_nonzerolen)) <= 0 && 1366 atomic_read(&(jl->j_commit_left)) <= 0) { 1367 goto flush_older_and_return; 1368 } 1369 1370 /* loop through each cnode, see if we need to write it, 1371 ** or wait on a more recent transaction, or just ignore it 1372 */ 1373 if (atomic_read(&(journal->j_wcount)) != 0) { 1374 reiserfs_panic(s, 1375 "journal-844: panic journal list is flushing, wcount is not 0\n"); 1376 } 1377 cn = jl->j_realblock; 1378 while (cn) { 1379 was_jwait = 0; 1380 was_dirty = 0; 1381 saved_bh = NULL; 1382 /* blocknr of 0 is no longer in the hash, ignore it */ 1383 if (cn->blocknr == 0) { 1384 goto free_cnode; 1385 } 1386 1387 /* This transaction failed commit. Don't write out to the disk */ 1388 if (!(jl->j_state & LIST_DIRTY)) 1389 goto free_cnode; 1390 1391 pjl = find_newer_jl_for_cn(cn); 1392 /* the order is important here. We check pjl to make sure we 1393 ** don't clear BH_JDirty_wait if we aren't the one writing this 1394 ** block to disk 1395 */ 1396 if (!pjl && cn->bh) { 1397 saved_bh = cn->bh; 1398 1399 /* we do this to make sure nobody releases the buffer while 1400 ** we are working with it 1401 */ 1402 get_bh(saved_bh); 1403 1404 if (buffer_journal_dirty(saved_bh)) { 1405 BUG_ON(!can_dirty(cn)); 1406 was_jwait = 1; 1407 was_dirty = 1; 1408 } else if (can_dirty(cn)) { 1409 /* everything with !pjl && jwait should be writable */ 1410 BUG(); 1411 } 1412 } 1413 1414 /* if someone has this block in a newer transaction, just make 1415 ** sure they are commited, and don't try writing it to disk 1416 */ 1417 if (pjl) { 1418 if (atomic_read(&pjl->j_commit_left)) 1419 flush_commit_list(s, pjl, 1); 1420 goto free_cnode; 1421 } 1422 1423 /* bh == NULL when the block got to disk on its own, OR, 1424 ** the block got freed in a future transaction 1425 */ 1426 if (saved_bh == NULL) { 1427 goto free_cnode; 1428 } 1429 1430 /* this should never happen. kupdate_one_transaction has this list 1431 ** locked while it works, so we should never see a buffer here that 1432 ** is not marked JDirty_wait 1433 */ 1434 if ((!was_jwait) && !buffer_locked(saved_bh)) { 1435 reiserfs_warning(s, 1436 "journal-813: BAD! buffer %llu %cdirty %cjwait, " 1437 "not in a newer tranasction", 1438 (unsigned long long)saved_bh-> 1439 b_blocknr, was_dirty ? ' ' : '!', 1440 was_jwait ? ' ' : '!'); 1441 } 1442 if (was_dirty) { 1443 /* we inc again because saved_bh gets decremented at free_cnode */ 1444 get_bh(saved_bh); 1445 set_bit(BLOCK_NEEDS_FLUSH, &cn->state); 1446 lock_buffer(saved_bh); 1447 BUG_ON(cn->blocknr != saved_bh->b_blocknr); 1448 if (buffer_dirty(saved_bh)) 1449 submit_logged_buffer(saved_bh); 1450 else 1451 unlock_buffer(saved_bh); 1452 count++; 1453 } else { 1454 reiserfs_warning(s, 1455 "clm-2082: Unable to flush buffer %llu in %s", 1456 (unsigned long long)saved_bh-> 1457 b_blocknr, __FUNCTION__); 1458 } 1459 free_cnode: 1460 last = cn; 1461 cn = cn->next; 1462 if (saved_bh) { 1463 /* we incremented this to keep others from taking the buffer head away */ 1464 put_bh(saved_bh); 1465 if (atomic_read(&(saved_bh->b_count)) < 0) { 1466 reiserfs_warning(s, 1467 "journal-945: saved_bh->b_count < 0"); 1468 } 1469 } 1470 } 1471 if (count > 0) { 1472 cn = jl->j_realblock; 1473 while (cn) { 1474 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) { 1475 if (!cn->bh) { 1476 reiserfs_panic(s, 1477 "journal-1011: cn->bh is NULL\n"); 1478 } 1479 wait_on_buffer(cn->bh); 1480 if (!cn->bh) { 1481 reiserfs_panic(s, 1482 "journal-1012: cn->bh is NULL\n"); 1483 } 1484 if (unlikely(!buffer_uptodate(cn->bh))) { 1485#ifdef CONFIG_REISERFS_CHECK 1486 reiserfs_warning(s, 1487 "journal-949: buffer write failed\n"); 1488#endif 1489 err = -EIO; 1490 } 1491 /* note, we must clear the JDirty_wait bit after the up to date 1492 ** check, otherwise we race against our flushpage routine 1493 */ 1494 BUG_ON(!test_clear_buffer_journal_dirty 1495 (cn->bh)); 1496 1497 /* undo the inc from journal_mark_dirty */ 1498 put_bh(cn->bh); 1499 brelse(cn->bh); 1500 } 1501 cn = cn->next; 1502 } 1503 } 1504 1505 if (err) 1506 reiserfs_abort(s, -EIO, 1507 "Write error while pushing transaction to disk in %s", 1508 __FUNCTION__); 1509 flush_older_and_return: 1510 1511 /* before we can update the journal header block, we _must_ flush all 1512 ** real blocks from all older transactions to disk. This is because 1513 ** once the header block is updated, this transaction will not be 1514 ** replayed after a crash 1515 */ 1516 if (flushall) { 1517 flush_older_journal_lists(s, jl); 1518 } 1519 1520 err = journal->j_errno; 1521 /* before we can remove everything from the hash tables for this 1522 ** transaction, we must make sure it can never be replayed 1523 ** 1524 ** since we are only called from do_journal_end, we know for sure there 1525 ** are no allocations going on while we are flushing journal lists. So, 1526 ** we only need to update the journal header block for the last list 1527 ** being flushed 1528 */ 1529 if (!err && flushall) { 1530 err = 1531 update_journal_header_block(s, 1532 (jl->j_start + jl->j_len + 1533 2) % SB_ONDISK_JOURNAL_SIZE(s), 1534 jl->j_trans_id); 1535 if (err) 1536 reiserfs_abort(s, -EIO, 1537 "Write error while updating journal header in %s", 1538 __FUNCTION__); 1539 } 1540 remove_all_from_journal_list(s, jl, 0); 1541 list_del_init(&jl->j_list); 1542 journal->j_num_lists--; 1543 del_from_work_list(s, jl); 1544 1545 if (journal->j_last_flush_id != 0 && 1546 (jl->j_trans_id - journal->j_last_flush_id) != 1) { 1547 reiserfs_warning(s, "clm-2201: last flush %lu, current %lu", 1548 journal->j_last_flush_id, jl->j_trans_id); 1549 } 1550 journal->j_last_flush_id = jl->j_trans_id; 1551 1552 /* not strictly required since we are freeing the list, but it should 1553 * help find code using dead lists later on 1554 */ 1555 jl->j_len = 0; 1556 atomic_set(&(jl->j_nonzerolen), 0); 1557 jl->j_start = 0; 1558 jl->j_realblock = NULL; 1559 jl->j_commit_bh = NULL; 1560 jl->j_trans_id = 0; 1561 jl->j_state = 0; 1562 put_journal_list(s, jl); 1563 if (flushall) 1564 up(&journal->j_flush_sem); 1565 put_fs_excl(); 1566 return err; 1567} 1568 1569static int write_one_transaction(struct super_block *s, 1570 struct reiserfs_journal_list *jl, 1571 struct buffer_chunk *chunk) 1572{ 1573 struct reiserfs_journal_cnode *cn; 1574 int ret = 0; 1575 1576 jl->j_state |= LIST_TOUCHED; 1577 del_from_work_list(s, jl); 1578 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) { 1579 return 0; 1580 } 1581 1582 cn = jl->j_realblock; 1583 while (cn) { 1584 /* if the blocknr == 0, this has been cleared from the hash, 1585 ** skip it 1586 */ 1587 if (cn->blocknr == 0) { 1588 goto next; 1589 } 1590 if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) { 1591 struct buffer_head *tmp_bh; 1592 /* we can race against journal_mark_freed when we try 1593 * to lock_buffer(cn->bh), so we have to inc the buffer 1594 * count, and recheck things after locking 1595 */ 1596 tmp_bh = cn->bh; 1597 get_bh(tmp_bh); 1598 lock_buffer(tmp_bh); 1599 if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) { 1600 if (!buffer_journal_dirty(tmp_bh) || 1601 buffer_journal_prepared(tmp_bh)) 1602 BUG(); 1603 add_to_chunk(chunk, tmp_bh, NULL, write_chunk); 1604 ret++; 1605 } else { 1606 /* note, cn->bh might be null now */ 1607 unlock_buffer(tmp_bh); 1608 } 1609 put_bh(tmp_bh); 1610 } 1611 next: 1612 cn = cn->next; 1613 cond_resched(); 1614 } 1615 return ret; 1616} 1617 1618/* used by flush_commit_list */ 1619static int dirty_one_transaction(struct super_block *s, 1620 struct reiserfs_journal_list *jl) 1621{ 1622 struct reiserfs_journal_cnode *cn; 1623 struct reiserfs_journal_list *pjl; 1624 int ret = 0; 1625 1626 jl->j_state |= LIST_DIRTY; 1627 cn = jl->j_realblock; 1628 while (cn) { 1629 /* look for a more recent transaction that logged this 1630 ** buffer. Only the most recent transaction with a buffer in 1631 ** it is allowed to send that buffer to disk 1632 */ 1633 pjl = find_newer_jl_for_cn(cn); 1634 if (!pjl && cn->blocknr && cn->bh 1635 && buffer_journal_dirty(cn->bh)) { 1636 BUG_ON(!can_dirty(cn)); 1637 /* if the buffer is prepared, it will either be logged 1638 * or restored. If restored, we need to make sure 1639 * it actually gets marked dirty 1640 */ 1641 clear_buffer_journal_new(cn->bh); 1642 if (buffer_journal_prepared(cn->bh)) { 1643 set_buffer_journal_restore_dirty(cn->bh); 1644 } else { 1645 set_buffer_journal_test(cn->bh); 1646 mark_buffer_dirty(cn->bh); 1647 } 1648 } 1649 cn = cn->next; 1650 } 1651 return ret; 1652} 1653 1654static int kupdate_transactions(struct super_block *s, 1655 struct reiserfs_journal_list *jl, 1656 struct reiserfs_journal_list **next_jl, 1657 unsigned long *next_trans_id, 1658 int num_blocks, int num_trans) 1659{ 1660 int ret = 0; 1661 int written = 0; 1662 int transactions_flushed = 0; 1663 unsigned long orig_trans_id = jl->j_trans_id; 1664 struct buffer_chunk chunk; 1665 struct list_head *entry; 1666 struct reiserfs_journal *journal = SB_JOURNAL(s); 1667 chunk.nr = 0; 1668 1669 down(&journal->j_flush_sem); 1670 if (!journal_list_still_alive(s, orig_trans_id)) { 1671 goto done; 1672 } 1673 1674 /* we've got j_flush_sem held, nobody is going to delete any 1675 * of these lists out from underneath us 1676 */ 1677 while ((num_trans && transactions_flushed < num_trans) || 1678 (!num_trans && written < num_blocks)) { 1679 1680 if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) || 1681 atomic_read(&jl->j_commit_left) 1682 || !(jl->j_state & LIST_DIRTY)) { 1683 del_from_work_list(s, jl); 1684 break; 1685 } 1686 ret = write_one_transaction(s, jl, &chunk); 1687 1688 if (ret < 0) 1689 goto done; 1690 transactions_flushed++; 1691 written += ret; 1692 entry = jl->j_list.next; 1693 1694 /* did we wrap? */ 1695 if (entry == &journal->j_journal_list) { 1696 break; 1697 } 1698 jl = JOURNAL_LIST_ENTRY(entry); 1699 1700 /* don't bother with older transactions */ 1701 if (jl->j_trans_id <= orig_trans_id) 1702 break; 1703 } 1704 if (chunk.nr) { 1705 write_chunk(&chunk); 1706 } 1707 1708 done: 1709 up(&journal->j_flush_sem); 1710 return ret; 1711} 1712 1713/* for o_sync and fsync heavy applications, they tend to use 1714** all the journa list slots with tiny transactions. These 1715** trigger lots and lots of calls to update the header block, which 1716** adds seeks and slows things down. 1717** 1718** This function tries to clear out a large chunk of the journal lists 1719** at once, which makes everything faster since only the newest journal 1720** list updates the header block 1721*/ 1722static int flush_used_journal_lists(struct super_block *s, 1723 struct reiserfs_journal_list *jl) 1724{ 1725 unsigned long len = 0; 1726 unsigned long cur_len; 1727 int ret; 1728 int i; 1729 int limit = 256; 1730 struct reiserfs_journal_list *tjl; 1731 struct reiserfs_journal_list *flush_jl; 1732 unsigned long trans_id; 1733 struct reiserfs_journal *journal = SB_JOURNAL(s); 1734 1735 flush_jl = tjl = jl; 1736 1737 /* in data logging mode, try harder to flush a lot of blocks */ 1738 if (reiserfs_data_log(s)) 1739 limit = 1024; 1740 /* flush for 256 transactions or limit blocks, whichever comes first */ 1741 for (i = 0; i < 256 && len < limit; i++) { 1742 if (atomic_read(&tjl->j_commit_left) || 1743 tjl->j_trans_id < jl->j_trans_id) { 1744 break; 1745 } 1746 cur_len = atomic_read(&tjl->j_nonzerolen); 1747 if (cur_len > 0) { 1748 tjl->j_state &= ~LIST_TOUCHED; 1749 } 1750 len += cur_len; 1751 flush_jl = tjl; 1752 if (tjl->j_list.next == &journal->j_journal_list) 1753 break; 1754 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); 1755 } 1756 /* try to find a group of blocks we can flush across all the 1757 ** transactions, but only bother if we've actually spanned 1758 ** across multiple lists 1759 */ 1760 if (flush_jl != jl) { 1761 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); 1762 } 1763 flush_journal_list(s, flush_jl, 1); 1764 return 0; 1765} 1766 1767/* 1768** removes any nodes in table with name block and dev as bh. 1769** only touchs the hnext and hprev pointers. 1770*/ 1771void remove_journal_hash(struct super_block *sb, 1772 struct reiserfs_journal_cnode **table, 1773 struct reiserfs_journal_list *jl, 1774 unsigned long block, int remove_freed) 1775{ 1776 struct reiserfs_journal_cnode *cur; 1777 struct reiserfs_journal_cnode **head; 1778 1779 head = &(journal_hash(table, sb, block)); 1780 if (!head) { 1781 return; 1782 } 1783 cur = *head; 1784 while (cur) { 1785 if (cur->blocknr == block && cur->sb == sb 1786 && (jl == NULL || jl == cur->jlist) 1787 && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) { 1788 if (cur->hnext) { 1789 cur->hnext->hprev = cur->hprev; 1790 } 1791 if (cur->hprev) { 1792 cur->hprev->hnext = cur->hnext; 1793 } else { 1794 *head = cur->hnext; 1795 } 1796 cur->blocknr = 0; 1797 cur->sb = NULL; 1798 cur->state = 0; 1799 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */ 1800 atomic_dec(&(cur->jlist->j_nonzerolen)); 1801 cur->bh = NULL; 1802 cur->jlist = NULL; 1803 } 1804 cur = cur->hnext; 1805 } 1806} 1807 1808static void free_journal_ram(struct super_block *p_s_sb) 1809{ 1810 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 1811 reiserfs_kfree(journal->j_current_jl, 1812 sizeof(struct reiserfs_journal_list), p_s_sb); 1813 journal->j_num_lists--; 1814 1815 vfree(journal->j_cnode_free_orig); 1816 free_list_bitmaps(p_s_sb, journal->j_list_bitmap); 1817 free_bitmap_nodes(p_s_sb); /* must be after free_list_bitmaps */ 1818 if (journal->j_header_bh) { 1819 brelse(journal->j_header_bh); 1820 } 1821 /* j_header_bh is on the journal dev, make sure not to release the journal 1822 * dev until we brelse j_header_bh 1823 */ 1824 release_journal_dev(p_s_sb, journal); 1825 vfree(journal); 1826} 1827 1828/* 1829** call on unmount. Only set error to 1 if you haven't made your way out 1830** of read_super() yet. Any other caller must keep error at 0. 1831*/ 1832static int do_journal_release(struct reiserfs_transaction_handle *th, 1833 struct super_block *p_s_sb, int error) 1834{ 1835 struct reiserfs_transaction_handle myth; 1836 int flushed = 0; 1837 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 1838 1839 /* we only want to flush out transactions if we were called with error == 0 1840 */ 1841 if (!error && !(p_s_sb->s_flags & MS_RDONLY)) { 1842 /* end the current trans */ 1843 BUG_ON(!th->t_trans_id); 1844 do_journal_end(th, p_s_sb, 10, FLUSH_ALL); 1845 1846 /* make sure something gets logged to force our way into the flush code */ 1847 if (!journal_join(&myth, p_s_sb, 1)) { 1848 reiserfs_prepare_for_journal(p_s_sb, 1849 SB_BUFFER_WITH_SB(p_s_sb), 1850 1); 1851 journal_mark_dirty(&myth, p_s_sb, 1852 SB_BUFFER_WITH_SB(p_s_sb)); 1853 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL); 1854 flushed = 1; 1855 } 1856 } 1857 1858 /* this also catches errors during the do_journal_end above */ 1859 if (!error && reiserfs_is_journal_aborted(journal)) { 1860 memset(&myth, 0, sizeof(myth)); 1861 if (!journal_join_abort(&myth, p_s_sb, 1)) { 1862 reiserfs_prepare_for_journal(p_s_sb, 1863 SB_BUFFER_WITH_SB(p_s_sb), 1864 1); 1865 journal_mark_dirty(&myth, p_s_sb, 1866 SB_BUFFER_WITH_SB(p_s_sb)); 1867 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL); 1868 } 1869 } 1870 1871 reiserfs_mounted_fs_count--; 1872 /* wait for all commits to finish */ 1873 cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work); 1874 flush_workqueue(commit_wq); 1875 if (!reiserfs_mounted_fs_count) { 1876 destroy_workqueue(commit_wq); 1877 commit_wq = NULL; 1878 } 1879 1880 free_journal_ram(p_s_sb); 1881 1882 return 0; 1883} 1884 1885/* 1886** call on unmount. flush all journal trans, release all alloc'd ram 1887*/ 1888int journal_release(struct reiserfs_transaction_handle *th, 1889 struct super_block *p_s_sb) 1890{ 1891 return do_journal_release(th, p_s_sb, 0); 1892} 1893 1894/* 1895** only call from an error condition inside reiserfs_read_super! 1896*/ 1897int journal_release_error(struct reiserfs_transaction_handle *th, 1898 struct super_block *p_s_sb) 1899{ 1900 return do_journal_release(th, p_s_sb, 1); 1901} 1902 1903/* compares description block with commit block. returns 1 if they differ, 0 if they are the same */ 1904static int journal_compare_desc_commit(struct super_block *p_s_sb, 1905 struct reiserfs_journal_desc *desc, 1906 struct reiserfs_journal_commit *commit) 1907{ 1908 if (get_commit_trans_id(commit) != get_desc_trans_id(desc) || 1909 get_commit_trans_len(commit) != get_desc_trans_len(desc) || 1910 get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max || 1911 get_commit_trans_len(commit) <= 0) { 1912 return 1; 1913 } 1914 return 0; 1915} 1916 1917/* returns 0 if it did not find a description block 1918** returns -1 if it found a corrupt commit block 1919** returns 1 if both desc and commit were valid 1920*/ 1921static int journal_transaction_is_valid(struct super_block *p_s_sb, 1922 struct buffer_head *d_bh, 1923 unsigned long *oldest_invalid_trans_id, 1924 unsigned long *newest_mount_id) 1925{ 1926 struct reiserfs_journal_desc *desc; 1927 struct reiserfs_journal_commit *commit; 1928 struct buffer_head *c_bh; 1929 unsigned long offset; 1930 1931 if (!d_bh) 1932 return 0; 1933 1934 desc = (struct reiserfs_journal_desc *)d_bh->b_data; 1935 if (get_desc_trans_len(desc) > 0 1936 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) { 1937 if (oldest_invalid_trans_id && *oldest_invalid_trans_id 1938 && get_desc_trans_id(desc) > *oldest_invalid_trans_id) { 1939 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 1940 "journal-986: transaction " 1941 "is valid returning because trans_id %d is greater than " 1942 "oldest_invalid %lu", 1943 get_desc_trans_id(desc), 1944 *oldest_invalid_trans_id); 1945 return 0; 1946 } 1947 if (newest_mount_id 1948 && *newest_mount_id > get_desc_mount_id(desc)) { 1949 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 1950 "journal-1087: transaction " 1951 "is valid returning because mount_id %d is less than " 1952 "newest_mount_id %lu", 1953 get_desc_mount_id(desc), 1954 *newest_mount_id); 1955 return -1; 1956 } 1957 if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) { 1958 reiserfs_warning(p_s_sb, 1959 "journal-2018: Bad transaction length %d encountered, ignoring transaction", 1960 get_desc_trans_len(desc)); 1961 return -1; 1962 } 1963 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb); 1964 1965 /* ok, we have a journal description block, lets see if the transaction was valid */ 1966 c_bh = 1967 journal_bread(p_s_sb, 1968 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 1969 ((offset + get_desc_trans_len(desc) + 1970 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))); 1971 if (!c_bh) 1972 return 0; 1973 commit = (struct reiserfs_journal_commit *)c_bh->b_data; 1974 if (journal_compare_desc_commit(p_s_sb, desc, commit)) { 1975 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 1976 "journal_transaction_is_valid, commit offset %ld had bad " 1977 "time %d or length %d", 1978 c_bh->b_blocknr - 1979 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 1980 get_commit_trans_id(commit), 1981 get_commit_trans_len(commit)); 1982 brelse(c_bh); 1983 if (oldest_invalid_trans_id) { 1984 *oldest_invalid_trans_id = 1985 get_desc_trans_id(desc); 1986 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 1987 "journal-1004: " 1988 "transaction_is_valid setting oldest invalid trans_id " 1989 "to %d", 1990 get_desc_trans_id(desc)); 1991 } 1992 return -1; 1993 } 1994 brelse(c_bh); 1995 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 1996 "journal-1006: found valid " 1997 "transaction start offset %llu, len %d id %d", 1998 d_bh->b_blocknr - 1999 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 2000 get_desc_trans_len(desc), 2001 get_desc_trans_id(desc)); 2002 return 1; 2003 } else { 2004 return 0; 2005 } 2006} 2007 2008static void brelse_array(struct buffer_head **heads, int num) 2009{ 2010 int i; 2011 for (i = 0; i < num; i++) { 2012 brelse(heads[i]); 2013 } 2014} 2015 2016/* 2017** given the start, and values for the oldest acceptable transactions, 2018** this either reads in a replays a transaction, or returns because the transaction 2019** is invalid, or too old. 2020*/ 2021static int journal_read_transaction(struct super_block *p_s_sb, 2022 unsigned long cur_dblock, 2023 unsigned long oldest_start, 2024 unsigned long oldest_trans_id, 2025 unsigned long newest_mount_id) 2026{ 2027 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 2028 struct reiserfs_journal_desc *desc; 2029 struct reiserfs_journal_commit *commit; 2030 unsigned long trans_id = 0; 2031 struct buffer_head *c_bh; 2032 struct buffer_head *d_bh; 2033 struct buffer_head **log_blocks = NULL; 2034 struct buffer_head **real_blocks = NULL; 2035 unsigned long trans_offset; 2036 int i; 2037 int trans_half; 2038 2039 d_bh = journal_bread(p_s_sb, cur_dblock); 2040 if (!d_bh) 2041 return 1; 2042 desc = (struct reiserfs_journal_desc *)d_bh->b_data; 2043 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb); 2044 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: " 2045 "journal_read_transaction, offset %llu, len %d mount_id %d", 2046 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 2047 get_desc_trans_len(desc), get_desc_mount_id(desc)); 2048 if (get_desc_trans_id(desc) < oldest_trans_id) { 2049 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: " 2050 "journal_read_trans skipping because %lu is too old", 2051 cur_dblock - 2052 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)); 2053 brelse(d_bh); 2054 return 1; 2055 } 2056 if (get_desc_mount_id(desc) != newest_mount_id) { 2057 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: " 2058 "journal_read_trans skipping because %d is != " 2059 "newest_mount_id %lu", get_desc_mount_id(desc), 2060 newest_mount_id); 2061 brelse(d_bh); 2062 return 1; 2063 } 2064 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 2065 ((trans_offset + get_desc_trans_len(desc) + 1) % 2066 SB_ONDISK_JOURNAL_SIZE(p_s_sb))); 2067 if (!c_bh) { 2068 brelse(d_bh); 2069 return 1; 2070 } 2071 commit = (struct reiserfs_journal_commit *)c_bh->b_data; 2072 if (journal_compare_desc_commit(p_s_sb, desc, commit)) { 2073 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 2074 "journal_read_transaction, " 2075 "commit offset %llu had bad time %d or length %d", 2076 c_bh->b_blocknr - 2077 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 2078 get_commit_trans_id(commit), 2079 get_commit_trans_len(commit)); 2080 brelse(c_bh); 2081 brelse(d_bh); 2082 return 1; 2083 } 2084 trans_id = get_desc_trans_id(desc); 2085 /* now we know we've got a good transaction, and it was inside the valid time ranges */ 2086 log_blocks = 2087 reiserfs_kmalloc(get_desc_trans_len(desc) * 2088 sizeof(struct buffer_head *), GFP_NOFS, p_s_sb); 2089 real_blocks = 2090 reiserfs_kmalloc(get_desc_trans_len(desc) * 2091 sizeof(struct buffer_head *), GFP_NOFS, p_s_sb); 2092 if (!log_blocks || !real_blocks) { 2093 brelse(c_bh); 2094 brelse(d_bh); 2095 reiserfs_kfree(log_blocks, 2096 get_desc_trans_len(desc) * 2097 sizeof(struct buffer_head *), p_s_sb); 2098 reiserfs_kfree(real_blocks, 2099 get_desc_trans_len(desc) * 2100 sizeof(struct buffer_head *), p_s_sb); 2101 reiserfs_warning(p_s_sb, 2102 "journal-1169: kmalloc failed, unable to mount FS"); 2103 return -1; 2104 } 2105 /* get all the buffer heads */ 2106 trans_half = journal_trans_half(p_s_sb->s_blocksize); 2107 for (i = 0; i < get_desc_trans_len(desc); i++) { 2108 log_blocks[i] = 2109 journal_getblk(p_s_sb, 2110 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 2111 (trans_offset + 1 + 2112 i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)); 2113 if (i < trans_half) { 2114 real_blocks[i] = 2115 sb_getblk(p_s_sb, 2116 le32_to_cpu(desc->j_realblock[i])); 2117 } else { 2118 real_blocks[i] = 2119 sb_getblk(p_s_sb, 2120 le32_to_cpu(commit-> 2121 j_realblock[i - trans_half])); 2122 } 2123 if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) { 2124 reiserfs_warning(p_s_sb, 2125 "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem"); 2126 goto abort_replay; 2127 } 2128 /* make sure we don't try to replay onto log or reserved area */ 2129 if (is_block_in_log_or_reserved_area 2130 (p_s_sb, real_blocks[i]->b_blocknr)) { 2131 reiserfs_warning(p_s_sb, 2132 "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block"); 2133 abort_replay: 2134 brelse_array(log_blocks, i); 2135 brelse_array(real_blocks, i); 2136 brelse(c_bh); 2137 brelse(d_bh); 2138 reiserfs_kfree(log_blocks, 2139 get_desc_trans_len(desc) * 2140 sizeof(struct buffer_head *), p_s_sb); 2141 reiserfs_kfree(real_blocks, 2142 get_desc_trans_len(desc) * 2143 sizeof(struct buffer_head *), p_s_sb); 2144 return -1; 2145 } 2146 } 2147 /* read in the log blocks, memcpy to the corresponding real block */ 2148 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks); 2149 for (i = 0; i < get_desc_trans_len(desc); i++) { 2150 wait_on_buffer(log_blocks[i]); 2151 if (!buffer_uptodate(log_blocks[i])) { 2152 reiserfs_warning(p_s_sb, 2153 "journal-1212: REPLAY FAILURE fsck required! buffer write failed"); 2154 brelse_array(log_blocks + i, 2155 get_desc_trans_len(desc) - i); 2156 brelse_array(real_blocks, get_desc_trans_len(desc)); 2157 brelse(c_bh); 2158 brelse(d_bh); 2159 reiserfs_kfree(log_blocks, 2160 get_desc_trans_len(desc) * 2161 sizeof(struct buffer_head *), p_s_sb); 2162 reiserfs_kfree(real_blocks, 2163 get_desc_trans_len(desc) * 2164 sizeof(struct buffer_head *), p_s_sb); 2165 return -1; 2166 } 2167 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, 2168 real_blocks[i]->b_size); 2169 set_buffer_uptodate(real_blocks[i]); 2170 brelse(log_blocks[i]); 2171 } 2172 /* flush out the real blocks */ 2173 for (i = 0; i < get_desc_trans_len(desc); i++) { 2174 set_buffer_dirty(real_blocks[i]); 2175 ll_rw_block(SWRITE, 1, real_blocks + i); 2176 } 2177 for (i = 0; i < get_desc_trans_len(desc); i++) { 2178 wait_on_buffer(real_blocks[i]); 2179 if (!buffer_uptodate(real_blocks[i])) { 2180 reiserfs_warning(p_s_sb, 2181 "journal-1226: REPLAY FAILURE, fsck required! buffer write failed"); 2182 brelse_array(real_blocks + i, 2183 get_desc_trans_len(desc) - i); 2184 brelse(c_bh); 2185 brelse(d_bh); 2186 reiserfs_kfree(log_blocks, 2187 get_desc_trans_len(desc) * 2188 sizeof(struct buffer_head *), p_s_sb); 2189 reiserfs_kfree(real_blocks, 2190 get_desc_trans_len(desc) * 2191 sizeof(struct buffer_head *), p_s_sb); 2192 return -1; 2193 } 2194 brelse(real_blocks[i]); 2195 } 2196 cur_dblock = 2197 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 2198 ((trans_offset + get_desc_trans_len(desc) + 2199 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)); 2200 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 2201 "journal-1095: setting journal " "start to offset %ld", 2202 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)); 2203 2204 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */ 2205 journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb); 2206 journal->j_last_flush_trans_id = trans_id; 2207 journal->j_trans_id = trans_id + 1; 2208 brelse(c_bh); 2209 brelse(d_bh); 2210 reiserfs_kfree(log_blocks, 2211 le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), 2212 p_s_sb); 2213 reiserfs_kfree(real_blocks, 2214 le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), 2215 p_s_sb); 2216 return 0; 2217} 2218 2219/* This function reads blocks starting from block and to max_block of bufsize 2220 size (but no more than BUFNR blocks at a time). This proved to improve 2221 mounting speed on self-rebuilding raid5 arrays at least. 2222 Right now it is only used from journal code. But later we might use it 2223 from other places. 2224 Note: Do not use journal_getblk/sb_getblk functions here! */ 2225static struct buffer_head *reiserfs_breada(struct block_device *dev, int block, 2226 int bufsize, unsigned int max_block) 2227{ 2228 struct buffer_head *bhlist[BUFNR]; 2229 unsigned int blocks = BUFNR; 2230 struct buffer_head *bh; 2231 int i, j; 2232 2233 bh = __getblk(dev, block, bufsize); 2234 if (buffer_uptodate(bh)) 2235 return (bh); 2236 2237 if (block + BUFNR > max_block) { 2238 blocks = max_block - block; 2239 } 2240 bhlist[0] = bh; 2241 j = 1; 2242 for (i = 1; i < blocks; i++) { 2243 bh = __getblk(dev, block + i, bufsize); 2244 if (buffer_uptodate(bh)) { 2245 brelse(bh); 2246 break; 2247 } else 2248 bhlist[j++] = bh; 2249 } 2250 ll_rw_block(READ, j, bhlist); 2251 for (i = 1; i < j; i++) 2252 brelse(bhlist[i]); 2253 bh = bhlist[0]; 2254 wait_on_buffer(bh); 2255 if (buffer_uptodate(bh)) 2256 return bh; 2257 brelse(bh); 2258 return NULL; 2259} 2260 2261/* 2262** read and replay the log 2263** on a clean unmount, the journal header's next unflushed pointer will be to an invalid 2264** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast. 2265** 2266** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid. 2267** 2268** On exit, it sets things up so the first transaction will work correctly. 2269*/ 2270static int journal_read(struct super_block *p_s_sb) 2271{ 2272 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 2273 struct reiserfs_journal_desc *desc; 2274 unsigned long oldest_trans_id = 0; 2275 unsigned long oldest_invalid_trans_id = 0; 2276 time_t start; 2277 unsigned long oldest_start = 0; 2278 unsigned long cur_dblock = 0; 2279 unsigned long newest_mount_id = 9; 2280 struct buffer_head *d_bh; 2281 struct reiserfs_journal_header *jh; 2282 int valid_journal_header = 0; 2283 int replay_count = 0; 2284 int continue_replay = 1; 2285 int ret; 2286 char b[BDEVNAME_SIZE]; 2287 2288 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb); 2289 reiserfs_info(p_s_sb, "checking transaction log (%s)\n", 2290 bdevname(journal->j_dev_bd, b)); 2291 start = get_seconds(); 2292 2293 /* step 1, read in the journal header block. Check the transaction it says 2294 ** is the first unflushed, and if that transaction is not valid, 2295 ** replay is done 2296 */ 2297 journal->j_header_bh = journal_bread(p_s_sb, 2298 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) 2299 + SB_ONDISK_JOURNAL_SIZE(p_s_sb)); 2300 if (!journal->j_header_bh) { 2301 return 1; 2302 } 2303 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data); 2304 if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 && 2305 le32_to_cpu(jh->j_first_unflushed_offset) < 2306 SB_ONDISK_JOURNAL_SIZE(p_s_sb) 2307 && le32_to_cpu(jh->j_last_flush_trans_id) > 0) { 2308 oldest_start = 2309 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 2310 le32_to_cpu(jh->j_first_unflushed_offset); 2311 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; 2312 newest_mount_id = le32_to_cpu(jh->j_mount_id); 2313 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 2314 "journal-1153: found in " 2315 "header: first_unflushed_offset %d, last_flushed_trans_id " 2316 "%lu", le32_to_cpu(jh->j_first_unflushed_offset), 2317 le32_to_cpu(jh->j_last_flush_trans_id)); 2318 valid_journal_header = 1; 2319 2320 /* now, we try to read the first unflushed offset. If it is not valid, 2321 ** there is nothing more we can do, and it makes no sense to read 2322 ** through the whole log. 2323 */ 2324 d_bh = 2325 journal_bread(p_s_sb, 2326 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 2327 le32_to_cpu(jh->j_first_unflushed_offset)); 2328 ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL); 2329 if (!ret) { 2330 continue_replay = 0; 2331 } 2332 brelse(d_bh); 2333 goto start_log_replay; 2334 } 2335 2336 if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) { 2337 reiserfs_warning(p_s_sb, 2338 "clm-2076: device is readonly, unable to replay log"); 2339 return -1; 2340 } 2341 2342 /* ok, there are transactions that need to be replayed. start with the first log block, find 2343 ** all the valid transactions, and pick out the oldest. 2344 */ 2345 while (continue_replay 2346 && cur_dblock < 2347 (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 2348 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) { 2349 /* Note that it is required for blocksize of primary fs device and journal 2350 device to be the same */ 2351 d_bh = 2352 reiserfs_breada(journal->j_dev_bd, cur_dblock, 2353 p_s_sb->s_blocksize, 2354 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 2355 SB_ONDISK_JOURNAL_SIZE(p_s_sb)); 2356 ret = 2357 journal_transaction_is_valid(p_s_sb, d_bh, 2358 &oldest_invalid_trans_id, 2359 &newest_mount_id); 2360 if (ret == 1) { 2361 desc = (struct reiserfs_journal_desc *)d_bh->b_data; 2362 if (oldest_start == 0) { /* init all oldest_ values */ 2363 oldest_trans_id = get_desc_trans_id(desc); 2364 oldest_start = d_bh->b_blocknr; 2365 newest_mount_id = get_desc_mount_id(desc); 2366 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 2367 "journal-1179: Setting " 2368 "oldest_start to offset %llu, trans_id %lu", 2369 oldest_start - 2370 SB_ONDISK_JOURNAL_1st_BLOCK 2371 (p_s_sb), oldest_trans_id); 2372 } else if (oldest_trans_id > get_desc_trans_id(desc)) { 2373 /* one we just read was older */ 2374 oldest_trans_id = get_desc_trans_id(desc); 2375 oldest_start = d_bh->b_blocknr; 2376 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 2377 "journal-1180: Resetting " 2378 "oldest_start to offset %lu, trans_id %lu", 2379 oldest_start - 2380 SB_ONDISK_JOURNAL_1st_BLOCK 2381 (p_s_sb), oldest_trans_id); 2382 } 2383 if (newest_mount_id < get_desc_mount_id(desc)) { 2384 newest_mount_id = get_desc_mount_id(desc); 2385 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 2386 "journal-1299: Setting " 2387 "newest_mount_id to %d", 2388 get_desc_mount_id(desc)); 2389 } 2390 cur_dblock += get_desc_trans_len(desc) + 2; 2391 } else { 2392 cur_dblock++; 2393 } 2394 brelse(d_bh); 2395 } 2396 2397 start_log_replay: 2398 cur_dblock = oldest_start; 2399 if (oldest_trans_id) { 2400 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 2401 "journal-1206: Starting replay " 2402 "from offset %llu, trans_id %lu", 2403 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 2404 oldest_trans_id); 2405 2406 } 2407 replay_count = 0; 2408 while (continue_replay && oldest_trans_id > 0) { 2409 ret = 2410 journal_read_transaction(p_s_sb, cur_dblock, oldest_start, 2411 oldest_trans_id, newest_mount_id); 2412 if (ret < 0) { 2413 return ret; 2414 } else if (ret != 0) { 2415 break; 2416 } 2417 cur_dblock = 2418 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start; 2419 replay_count++; 2420 if (cur_dblock == oldest_start) 2421 break; 2422 } 2423 2424 if (oldest_trans_id == 0) { 2425 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 2426 "journal-1225: No valid " "transactions found"); 2427 } 2428 /* j_start does not get set correctly if we don't replay any transactions. 2429 ** if we had a valid journal_header, set j_start to the first unflushed transaction value, 2430 ** copy the trans_id from the header 2431 */ 2432 if (valid_journal_header && replay_count == 0) { 2433 journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset); 2434 journal->j_trans_id = 2435 le32_to_cpu(jh->j_last_flush_trans_id) + 1; 2436 journal->j_last_flush_trans_id = 2437 le32_to_cpu(jh->j_last_flush_trans_id); 2438 journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1; 2439 } else { 2440 journal->j_mount_id = newest_mount_id + 1; 2441 } 2442 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " 2443 "newest_mount_id to %lu", journal->j_mount_id); 2444 journal->j_first_unflushed_offset = journal->j_start; 2445 if (replay_count > 0) { 2446 reiserfs_info(p_s_sb, 2447 "replayed %d transactions in %lu seconds\n", 2448 replay_count, get_seconds() - start); 2449 } 2450 if (!bdev_read_only(p_s_sb->s_bdev) && 2451 _update_journal_header_block(p_s_sb, journal->j_start, 2452 journal->j_last_flush_trans_id)) { 2453 /* replay failed, caller must call free_journal_ram and abort 2454 ** the mount 2455 */ 2456 return -1; 2457 } 2458 return 0; 2459} 2460 2461static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s) 2462{ 2463 struct reiserfs_journal_list *jl; 2464 retry: 2465 jl = reiserfs_kmalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS, 2466 s); 2467 if (!jl) { 2468 yield(); 2469 goto retry; 2470 } 2471 memset(jl, 0, sizeof(*jl)); 2472 INIT_LIST_HEAD(&jl->j_list); 2473 INIT_LIST_HEAD(&jl->j_working_list); 2474 INIT_LIST_HEAD(&jl->j_tail_bh_list); 2475 INIT_LIST_HEAD(&jl->j_bh_list); 2476 sema_init(&jl->j_commit_lock, 1); 2477 SB_JOURNAL(s)->j_num_lists++; 2478 get_journal_list(jl); 2479 return jl; 2480} 2481 2482static void journal_list_init(struct super_block *p_s_sb) 2483{ 2484 SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb); 2485} 2486 2487static int release_journal_dev(struct super_block *super, 2488 struct reiserfs_journal *journal) 2489{ 2490 int result; 2491 2492 result = 0; 2493 2494 if (journal->j_dev_file != NULL) { 2495 result = filp_close(journal->j_dev_file, NULL); 2496 journal->j_dev_file = NULL; 2497 journal->j_dev_bd = NULL; 2498 } else if (journal->j_dev_bd != NULL) { 2499 result = blkdev_put(journal->j_dev_bd); 2500 journal->j_dev_bd = NULL; 2501 } 2502 2503 if (result != 0) { 2504 reiserfs_warning(super, 2505 "sh-457: release_journal_dev: Cannot release journal device: %i", 2506 result); 2507 } 2508 return result; 2509} 2510 2511static int journal_init_dev(struct super_block *super, 2512 struct reiserfs_journal *journal, 2513 const char *jdev_name) 2514{ 2515 int result; 2516 dev_t jdev; 2517 int blkdev_mode = FMODE_READ | FMODE_WRITE; 2518 char b[BDEVNAME_SIZE]; 2519 2520 result = 0; 2521 2522 journal->j_dev_bd = NULL; 2523 journal->j_dev_file = NULL; 2524 jdev = SB_ONDISK_JOURNAL_DEVICE(super) ? 2525 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev; 2526 2527 if (bdev_read_only(super->s_bdev)) 2528 blkdev_mode = FMODE_READ; 2529 2530 /* there is no "jdev" option and journal is on separate device */ 2531 if ((!jdev_name || !jdev_name[0])) { 2532 journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode); 2533 if (IS_ERR(journal->j_dev_bd)) { 2534 result = PTR_ERR(journal->j_dev_bd); 2535 journal->j_dev_bd = NULL; 2536 reiserfs_warning(super, "sh-458: journal_init_dev: " 2537 "cannot init journal device '%s': %i", 2538 __bdevname(jdev, b), result); 2539 return result; 2540 } else if (jdev != super->s_dev) 2541 set_blocksize(journal->j_dev_bd, super->s_blocksize); 2542 return 0; 2543 } 2544 2545 journal->j_dev_file = filp_open(jdev_name, 0, 0); 2546 if (!IS_ERR(journal->j_dev_file)) { 2547 struct inode *jdev_inode = journal->j_dev_file->f_mapping->host; 2548 if (!S_ISBLK(jdev_inode->i_mode)) { 2549 reiserfs_warning(super, "journal_init_dev: '%s' is " 2550 "not a block device", jdev_name); 2551 result = -ENOTBLK; 2552 release_journal_dev(super, journal); 2553 } else { 2554 /* ok */ 2555 journal->j_dev_bd = I_BDEV(jdev_inode); 2556 set_blocksize(journal->j_dev_bd, super->s_blocksize); 2557 reiserfs_info(super, 2558 "journal_init_dev: journal device: %s\n", 2559 bdevname(journal->j_dev_bd, b)); 2560 } 2561 } else { 2562 result = PTR_ERR(journal->j_dev_file); 2563 journal->j_dev_file = NULL; 2564 reiserfs_warning(super, 2565 "journal_init_dev: Cannot open '%s': %i", 2566 jdev_name, result); 2567 } 2568 return result; 2569} 2570 2571/* 2572** must be called once on fs mount. calls journal_read for you 2573*/ 2574int journal_init(struct super_block *p_s_sb, const char *j_dev_name, 2575 int old_format, unsigned int commit_max_age) 2576{ 2577 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2; 2578 struct buffer_head *bhjh; 2579 struct reiserfs_super_block *rs; 2580 struct reiserfs_journal_header *jh; 2581 struct reiserfs_journal *journal; 2582 struct reiserfs_journal_list *jl; 2583 char b[BDEVNAME_SIZE]; 2584 2585 journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal)); 2586 if (!journal) { 2587 reiserfs_warning(p_s_sb, 2588 "journal-1256: unable to get memory for journal structure"); 2589 return 1; 2590 } 2591 memset(journal, 0, sizeof(struct reiserfs_journal)); 2592 INIT_LIST_HEAD(&journal->j_bitmap_nodes); 2593 INIT_LIST_HEAD(&journal->j_prealloc_list); 2594 INIT_LIST_HEAD(&journal->j_working_list); 2595 INIT_LIST_HEAD(&journal->j_journal_list); 2596 journal->j_persistent_trans = 0; 2597 if (reiserfs_allocate_list_bitmaps(p_s_sb, 2598 journal->j_list_bitmap, 2599 SB_BMAP_NR(p_s_sb))) 2600 goto free_and_return; 2601 allocate_bitmap_nodes(p_s_sb); 2602 2603 /* reserved for journal area support */ 2604 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ? 2605 REISERFS_OLD_DISK_OFFSET_IN_BYTES 2606 / p_s_sb->s_blocksize + 2607 SB_BMAP_NR(p_s_sb) + 2608 1 : 2609 REISERFS_DISK_OFFSET_IN_BYTES / 2610 p_s_sb->s_blocksize + 2); 2611 2612 /* Sanity check to see is the standard journal fitting withing first bitmap 2613 (actual for small blocksizes) */ 2614 if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) && 2615 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) + 2616 SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) { 2617 reiserfs_warning(p_s_sb, 2618 "journal-1393: journal does not fit for area " 2619 "addressed by first of bitmap blocks. It starts at " 2620 "%u and its size is %u. Block size %ld", 2621 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb), 2622 SB_ONDISK_JOURNAL_SIZE(p_s_sb), 2623 p_s_sb->s_blocksize); 2624 goto free_and_return; 2625 } 2626 2627 if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) { 2628 reiserfs_warning(p_s_sb, 2629 "sh-462: unable to initialize jornal device"); 2630 goto free_and_return; 2631 } 2632 2633 rs = SB_DISK_SUPER_BLOCK(p_s_sb); 2634 2635 /* read journal header */ 2636 bhjh = journal_bread(p_s_sb, 2637 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 2638 SB_ONDISK_JOURNAL_SIZE(p_s_sb)); 2639 if (!bhjh) { 2640 reiserfs_warning(p_s_sb, 2641 "sh-459: unable to read journal header"); 2642 goto free_and_return; 2643 } 2644 jh = (struct reiserfs_journal_header *)(bhjh->b_data); 2645 2646 /* make sure that journal matches to the super block */ 2647 if (is_reiserfs_jr(rs) 2648 && (le32_to_cpu(jh->jh_journal.jp_journal_magic) != 2649 sb_jp_journal_magic(rs))) { 2650 reiserfs_warning(p_s_sb, 2651 "sh-460: journal header magic %x " 2652 "(device %s) does not match to magic found in super " 2653 "block %x", jh->jh_journal.jp_journal_magic, 2654 bdevname(journal->j_dev_bd, b), 2655 sb_jp_journal_magic(rs)); 2656 brelse(bhjh); 2657 goto free_and_return; 2658 } 2659 2660 journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max); 2661 journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch); 2662 journal->j_max_commit_age = 2663 le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age); 2664 journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE; 2665 2666 if (journal->j_trans_max) { 2667 /* make sure these parameters are available, assign it if they are not */ 2668 __u32 initial = journal->j_trans_max; 2669 __u32 ratio = 1; 2670 2671 if (p_s_sb->s_blocksize < 4096) 2672 ratio = 4096 / p_s_sb->s_blocksize; 2673 2674 if (SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max < 2675 JOURNAL_MIN_RATIO) 2676 journal->j_trans_max = 2677 SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO; 2678 if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio) 2679 journal->j_trans_max = 2680 JOURNAL_TRANS_MAX_DEFAULT / ratio; 2681 if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio) 2682 journal->j_trans_max = 2683 JOURNAL_TRANS_MIN_DEFAULT / ratio; 2684 2685 if (journal->j_trans_max != initial) 2686 reiserfs_warning(p_s_sb, 2687 "sh-461: journal_init: wrong transaction max size (%u). Changed to %u", 2688 initial, journal->j_trans_max); 2689 2690 journal->j_max_batch = journal->j_trans_max * 2691 JOURNAL_MAX_BATCH_DEFAULT / JOURNAL_TRANS_MAX_DEFAULT; 2692 } 2693 2694 if (!journal->j_trans_max) { 2695 /*we have the file system was created by old version of mkreiserfs 2696 so this field contains zero value */ 2697 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT; 2698 journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT; 2699 journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE; 2700 2701 /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096 2702 trans max size is decreased proportionally */ 2703 if (p_s_sb->s_blocksize < 4096) { 2704 journal->j_trans_max /= (4096 / p_s_sb->s_blocksize); 2705 journal->j_max_batch = (journal->j_trans_max) * 9 / 10; 2706 } 2707 } 2708 2709 journal->j_default_max_commit_age = journal->j_max_commit_age; 2710 2711 if (commit_max_age != 0) { 2712 journal->j_max_commit_age = commit_max_age; 2713 journal->j_max_trans_age = commit_max_age; 2714 } 2715 2716 reiserfs_info(p_s_sb, "journal params: device %s, size %u, " 2717 "journal first block %u, max trans len %u, max batch %u, " 2718 "max commit age %u, max trans age %u\n", 2719 bdevname(journal->j_dev_bd, b), 2720 SB_ONDISK_JOURNAL_SIZE(p_s_sb), 2721 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 2722 journal->j_trans_max, 2723 journal->j_max_batch, 2724 journal->j_max_commit_age, journal->j_max_trans_age); 2725 2726 brelse(bhjh); 2727 2728 journal->j_list_bitmap_index = 0; 2729 journal_list_init(p_s_sb); 2730 2731 memset(journal->j_list_hash_table, 0, 2732 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); 2733 2734 INIT_LIST_HEAD(&journal->j_dirty_buffers); 2735 spin_lock_init(&journal->j_dirty_buffers_lock); 2736 2737 journal->j_start = 0; 2738 journal->j_len = 0; 2739 journal->j_len_alloc = 0; 2740 atomic_set(&(journal->j_wcount), 0); 2741 atomic_set(&(journal->j_async_throttle), 0); 2742 journal->j_bcount = 0; 2743 journal->j_trans_start_time = 0; 2744 journal->j_last = NULL; 2745 journal->j_first = NULL; 2746 init_waitqueue_head(&(journal->j_join_wait)); 2747 sema_init(&journal->j_lock, 1); 2748 sema_init(&journal->j_flush_sem, 1); 2749 2750 journal->j_trans_id = 10; 2751 journal->j_mount_id = 10; 2752 journal->j_state = 0; 2753 atomic_set(&(journal->j_jlock), 0); 2754 journal->j_cnode_free_list = allocate_cnodes(num_cnodes); 2755 journal->j_cnode_free_orig = journal->j_cnode_free_list; 2756 journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0; 2757 journal->j_cnode_used = 0; 2758 journal->j_must_wait = 0; 2759 2760 init_journal_hash(p_s_sb); 2761 jl = journal->j_current_jl; 2762 jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl); 2763 if (!jl->j_list_bitmap) { 2764 reiserfs_warning(p_s_sb, 2765 "journal-2005, get_list_bitmap failed for journal list 0"); 2766 goto free_and_return; 2767 } 2768 if (journal_read(p_s_sb) < 0) { 2769 reiserfs_warning(p_s_sb, "Replay Failure, unable to mount"); 2770 goto free_and_return; 2771 } 2772 2773 reiserfs_mounted_fs_count++; 2774 if (reiserfs_mounted_fs_count <= 1) 2775 commit_wq = create_workqueue("reiserfs"); 2776 2777 INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); 2778 return 0; 2779 free_and_return: 2780 free_journal_ram(p_s_sb); 2781 return 1; 2782} 2783 2784/* 2785** test for a polite end of the current transaction. Used by file_write, and should 2786** be used by delete to make sure they don't write more than can fit inside a single 2787** transaction 2788*/ 2789int journal_transaction_should_end(struct reiserfs_transaction_handle *th, 2790 int new_alloc) 2791{ 2792 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); 2793 time_t now = get_seconds(); 2794 /* cannot restart while nested */ 2795 BUG_ON(!th->t_trans_id); 2796 if (th->t_refcount > 1) 2797 return 0; 2798 if (journal->j_must_wait > 0 || 2799 (journal->j_len_alloc + new_alloc) >= journal->j_max_batch || 2800 atomic_read(&(journal->j_jlock)) || 2801 (now - journal->j_trans_start_time) > journal->j_max_trans_age || 2802 journal->j_cnode_free < (journal->j_trans_max * 3)) { 2803 return 1; 2804 } 2805 return 0; 2806} 2807 2808/* this must be called inside a transaction, and requires the 2809** kernel_lock to be held 2810*/ 2811void reiserfs_block_writes(struct reiserfs_transaction_handle *th) 2812{ 2813 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); 2814 BUG_ON(!th->t_trans_id); 2815 journal->j_must_wait = 1; 2816 set_bit(J_WRITERS_BLOCKED, &journal->j_state); 2817 return; 2818} 2819 2820/* this must be called without a transaction started, and does not 2821** require BKL 2822*/ 2823void reiserfs_allow_writes(struct super_block *s) 2824{ 2825 struct reiserfs_journal *journal = SB_JOURNAL(s); 2826 clear_bit(J_WRITERS_BLOCKED, &journal->j_state); 2827 wake_up(&journal->j_join_wait); 2828} 2829 2830/* this must be called without a transaction started, and does not 2831** require BKL 2832*/ 2833void reiserfs_wait_on_write_block(struct super_block *s) 2834{ 2835 struct reiserfs_journal *journal = SB_JOURNAL(s); 2836 wait_event(journal->j_join_wait, 2837 !test_bit(J_WRITERS_BLOCKED, &journal->j_state)); 2838} 2839 2840static void queue_log_writer(struct super_block *s) 2841{ 2842 wait_queue_t wait; 2843 struct reiserfs_journal *journal = SB_JOURNAL(s); 2844 set_bit(J_WRITERS_QUEUED, &journal->j_state); 2845 2846 /* 2847 * we don't want to use wait_event here because 2848 * we only want to wait once. 2849 */ 2850 init_waitqueue_entry(&wait, current); 2851 add_wait_queue(&journal->j_join_wait, &wait); 2852 set_current_state(TASK_UNINTERRUPTIBLE); 2853 if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) 2854 schedule(); 2855 current->state = TASK_RUNNING; 2856 remove_wait_queue(&journal->j_join_wait, &wait); 2857} 2858 2859static void wake_queued_writers(struct super_block *s) 2860{ 2861 struct reiserfs_journal *journal = SB_JOURNAL(s); 2862 if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state)) 2863 wake_up(&journal->j_join_wait); 2864} 2865 2866static void let_transaction_grow(struct super_block *sb, unsigned long trans_id) 2867{ 2868 struct reiserfs_journal *journal = SB_JOURNAL(sb); 2869 unsigned long bcount = journal->j_bcount; 2870 while (1) { 2871 schedule_timeout_uninterruptible(1); 2872 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING; 2873 while ((atomic_read(&journal->j_wcount) > 0 || 2874 atomic_read(&journal->j_jlock)) && 2875 journal->j_trans_id == trans_id) { 2876 queue_log_writer(sb); 2877 } 2878 if (journal->j_trans_id != trans_id) 2879 break; 2880 if (bcount == journal->j_bcount) 2881 break; 2882 bcount = journal->j_bcount; 2883 } 2884} 2885 2886/* join == true if you must join an existing transaction. 2887** join == false if you can deal with waiting for others to finish 2888** 2889** this will block until the transaction is joinable. send the number of blocks you 2890** expect to use in nblocks. 2891*/ 2892static int do_journal_begin_r(struct reiserfs_transaction_handle *th, 2893 struct super_block *p_s_sb, unsigned long nblocks, 2894 int join) 2895{ 2896 time_t now = get_seconds(); 2897 int old_trans_id; 2898 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 2899 struct reiserfs_transaction_handle myth; 2900 int sched_count = 0; 2901 int retval; 2902 2903 reiserfs_check_lock_depth(p_s_sb, "journal_begin"); 2904 if (nblocks > journal->j_trans_max) 2905 BUG(); 2906 2907 PROC_INFO_INC(p_s_sb, journal.journal_being); 2908 /* set here for journal_join */ 2909 th->t_refcount = 1; 2910 th->t_super = p_s_sb; 2911 2912 relock: 2913 lock_journal(p_s_sb); 2914 if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) { 2915 unlock_journal(p_s_sb); 2916 retval = journal->j_errno; 2917 goto out_fail; 2918 } 2919 journal->j_bcount++; 2920 2921 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) { 2922 unlock_journal(p_s_sb); 2923 reiserfs_wait_on_write_block(p_s_sb); 2924 PROC_INFO_INC(p_s_sb, journal.journal_relock_writers); 2925 goto relock; 2926 } 2927 now = get_seconds(); 2928 2929 /* if there is no room in the journal OR 2930 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning 2931 ** we don't sleep if there aren't other writers 2932 */ 2933 2934 if ((!join && journal->j_must_wait > 0) || 2935 (!join 2936 && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) 2937 || (!join && atomic_read(&journal->j_wcount) > 0 2938 && journal->j_trans_start_time > 0 2939 && (now - journal->j_trans_start_time) > 2940 journal->j_max_trans_age) || (!join 2941 && atomic_read(&journal->j_jlock)) 2942 || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) { 2943 2944 old_trans_id = journal->j_trans_id; 2945 unlock_journal(p_s_sb); /* allow others to finish this transaction */ 2946 2947 if (!join && (journal->j_len_alloc + nblocks + 2) >= 2948 journal->j_max_batch && 2949 ((journal->j_len + nblocks + 2) * 100) < 2950 (journal->j_len_alloc * 75)) { 2951 if (atomic_read(&journal->j_wcount) > 10) { 2952 sched_count++; 2953 queue_log_writer(p_s_sb); 2954 goto relock; 2955 } 2956 } 2957 /* don't mess with joining the transaction if all we have to do is 2958 * wait for someone else to do a commit 2959 */ 2960 if (atomic_read(&journal->j_jlock)) { 2961 while (journal->j_trans_id == old_trans_id && 2962 atomic_read(&journal->j_jlock)) { 2963 queue_log_writer(p_s_sb); 2964 } 2965 goto relock; 2966 } 2967 retval = journal_join(&myth, p_s_sb, 1); 2968 if (retval) 2969 goto out_fail; 2970 2971 /* someone might have ended the transaction while we joined */ 2972 if (old_trans_id != journal->j_trans_id) { 2973 retval = do_journal_end(&myth, p_s_sb, 1, 0); 2974 } else { 2975 retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW); 2976 } 2977 2978 if (retval) 2979 goto out_fail; 2980 2981 PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount); 2982 goto relock; 2983 } 2984 /* we are the first writer, set trans_id */ 2985 if (journal->j_trans_start_time == 0) { 2986 journal->j_trans_start_time = get_seconds(); 2987 } 2988 atomic_inc(&(journal->j_wcount)); 2989 journal->j_len_alloc += nblocks; 2990 th->t_blocks_logged = 0; 2991 th->t_blocks_allocated = nblocks; 2992 th->t_trans_id = journal->j_trans_id; 2993 unlock_journal(p_s_sb); 2994 INIT_LIST_HEAD(&th->t_list); 2995 get_fs_excl(); 2996 return 0; 2997 2998 out_fail: 2999 memset(th, 0, sizeof(*th)); 3000 /* Re-set th->t_super, so we can properly keep track of how many 3001 * persistent transactions there are. We need to do this so if this 3002 * call is part of a failed restart_transaction, we can free it later */ 3003 th->t_super = p_s_sb; 3004 return retval; 3005} 3006 3007struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct 3008 super_block 3009 *s, 3010 int nblocks) 3011{ 3012 int ret; 3013 struct reiserfs_transaction_handle *th; 3014 3015 /* if we're nesting into an existing transaction. It will be 3016 ** persistent on its own 3017 */ 3018 if (reiserfs_transaction_running(s)) { 3019 th = current->journal_info; 3020 th->t_refcount++; 3021 if (th->t_refcount < 2) { 3022 BUG(); 3023 } 3024 return th; 3025 } 3026 th = reiserfs_kmalloc(sizeof(struct reiserfs_transaction_handle), 3027 GFP_NOFS, s); 3028 if (!th) 3029 return NULL; 3030 ret = journal_begin(th, s, nblocks); 3031 if (ret) { 3032 reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), 3033 s); 3034 return NULL; 3035 } 3036 3037 SB_JOURNAL(s)->j_persistent_trans++; 3038 return th; 3039} 3040 3041int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) 3042{ 3043 struct super_block *s = th->t_super; 3044 int ret = 0; 3045 if (th->t_trans_id) 3046 ret = journal_end(th, th->t_super, th->t_blocks_allocated); 3047 else 3048 ret = -EIO; 3049 if (th->t_refcount == 0) { 3050 SB_JOURNAL(s)->j_persistent_trans--; 3051 reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), 3052 s); 3053 } 3054 return ret; 3055} 3056 3057static int journal_join(struct reiserfs_transaction_handle *th, 3058 struct super_block *p_s_sb, unsigned long nblocks) 3059{ 3060 struct reiserfs_transaction_handle *cur_th = current->journal_info; 3061 3062 /* this keeps do_journal_end from NULLing out the current->journal_info 3063 ** pointer 3064 */ 3065 th->t_handle_save = cur_th; 3066 if (cur_th && cur_th->t_refcount > 1) { 3067 BUG(); 3068 } 3069 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN); 3070} 3071 3072int journal_join_abort(struct reiserfs_transaction_handle *th, 3073 struct super_block *p_s_sb, unsigned long nblocks) 3074{ 3075 struct reiserfs_transaction_handle *cur_th = current->journal_info; 3076 3077 /* this keeps do_journal_end from NULLing out the current->journal_info 3078 ** pointer 3079 */ 3080 th->t_handle_save = cur_th; 3081 if (cur_th && cur_th->t_refcount > 1) { 3082 BUG(); 3083 } 3084 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT); 3085} 3086 3087int journal_begin(struct reiserfs_transaction_handle *th, 3088 struct super_block *p_s_sb, unsigned long nblocks) 3089{ 3090 struct reiserfs_transaction_handle *cur_th = current->journal_info; 3091 int ret; 3092 3093 th->t_handle_save = NULL; 3094 if (cur_th) { 3095 /* we are nesting into the current transaction */ 3096 if (cur_th->t_super == p_s_sb) { 3097 BUG_ON(!cur_th->t_refcount); 3098 cur_th->t_refcount++; 3099 memcpy(th, cur_th, sizeof(*th)); 3100 if (th->t_refcount <= 1) 3101 reiserfs_warning(p_s_sb, 3102 "BAD: refcount <= 1, but journal_info != 0"); 3103 return 0; 3104 } else { 3105 /* we've ended up with a handle from a different filesystem. 3106 ** save it and restore on journal_end. This should never 3107 ** really happen... 3108 */ 3109 reiserfs_warning(p_s_sb, 3110 "clm-2100: nesting info a different FS"); 3111 th->t_handle_save = current->journal_info; 3112 current->journal_info = th; 3113 } 3114 } else { 3115 current->journal_info = th; 3116 } 3117 ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG); 3118 if (current->journal_info != th) 3119 BUG(); 3120 3121 /* I guess this boils down to being the reciprocal of clm-2100 above. 3122 * If do_journal_begin_r fails, we need to put it back, since journal_end 3123 * won't be called to do it. */ 3124 if (ret) 3125 current->journal_info = th->t_handle_save; 3126 else 3127 BUG_ON(!th->t_refcount); 3128 3129 return ret; 3130} 3131 3132/* 3133** puts bh into the current transaction. If it was already there, reorders removes the 3134** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order). 3135** 3136** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the 3137** transaction is committed. 3138** 3139** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len. 3140*/ 3141int journal_mark_dirty(struct reiserfs_transaction_handle *th, 3142 struct super_block *p_s_sb, struct buffer_head *bh) 3143{ 3144 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3145 struct reiserfs_journal_cnode *cn = NULL; 3146 int count_already_incd = 0; 3147 int prepared = 0; 3148 BUG_ON(!th->t_trans_id); 3149 3150 PROC_INFO_INC(p_s_sb, journal.mark_dirty); 3151 if (th->t_trans_id != journal->j_trans_id) { 3152 reiserfs_panic(th->t_super, 3153 "journal-1577: handle trans id %ld != current trans id %ld\n", 3154 th->t_trans_id, journal->j_trans_id); 3155 } 3156 3157 p_s_sb->s_dirt = 1; 3158 3159 prepared = test_clear_buffer_journal_prepared(bh); 3160 clear_buffer_journal_restore_dirty(bh); 3161 /* already in this transaction, we are done */ 3162 if (buffer_journaled(bh)) { 3163 PROC_INFO_INC(p_s_sb, journal.mark_dirty_already); 3164 return 0; 3165 } 3166 3167 /* this must be turned into a panic instead of a warning. We can't allow 3168 ** a dirty or journal_dirty or locked buffer to be logged, as some changes 3169 ** could get to disk too early. NOT GOOD. 3170 */ 3171 if (!prepared || buffer_dirty(bh)) { 3172 reiserfs_warning(p_s_sb, "journal-1777: buffer %llu bad state " 3173 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT", 3174 (unsigned long long)bh->b_blocknr, 3175 prepared ? ' ' : '!', 3176 buffer_locked(bh) ? ' ' : '!', 3177 buffer_dirty(bh) ? ' ' : '!', 3178 buffer_journal_dirty(bh) ? ' ' : '!'); 3179 } 3180 3181 if (atomic_read(&(journal->j_wcount)) <= 0) { 3182 reiserfs_warning(p_s_sb, 3183 "journal-1409: journal_mark_dirty returning because j_wcount was %d", 3184 atomic_read(&(journal->j_wcount))); 3185 return 1; 3186 } 3187 /* this error means I've screwed up, and we've overflowed the transaction. 3188 ** Nothing can be done here, except make the FS readonly or panic. 3189 */ 3190 if (journal->j_len >= journal->j_trans_max) { 3191 reiserfs_panic(th->t_super, 3192 "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", 3193 journal->j_len); 3194 } 3195 3196 if (buffer_journal_dirty(bh)) { 3197 count_already_incd = 1; 3198 PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal); 3199 clear_buffer_journal_dirty(bh); 3200 } 3201 3202 if (journal->j_len > journal->j_len_alloc) { 3203 journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT; 3204 } 3205 3206 set_buffer_journaled(bh); 3207 3208 /* now put this guy on the end */ 3209 if (!cn) { 3210 cn = get_cnode(p_s_sb); 3211 if (!cn) { 3212 reiserfs_panic(p_s_sb, "get_cnode failed!\n"); 3213 } 3214 3215 if (th->t_blocks_logged == th->t_blocks_allocated) { 3216 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT; 3217 journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT; 3218 } 3219 th->t_blocks_logged++; 3220 journal->j_len++; 3221 3222 cn->bh = bh; 3223 cn->blocknr = bh->b_blocknr; 3224 cn->sb = p_s_sb; 3225 cn->jlist = NULL; 3226 insert_journal_hash(journal->j_hash_table, cn); 3227 if (!count_already_incd) { 3228 get_bh(bh); 3229 } 3230 } 3231 cn->next = NULL; 3232 cn->prev = journal->j_last; 3233 cn->bh = bh; 3234 if (journal->j_last) { 3235 journal->j_last->next = cn; 3236 journal->j_last = cn; 3237 } else { 3238 journal->j_first = cn; 3239 journal->j_last = cn; 3240 } 3241 return 0; 3242} 3243 3244int journal_end(struct reiserfs_transaction_handle *th, 3245 struct super_block *p_s_sb, unsigned long nblocks) 3246{ 3247 if (!current->journal_info && th->t_refcount > 1) 3248 reiserfs_warning(p_s_sb, "REISER-NESTING: th NULL, refcount %d", 3249 th->t_refcount); 3250 3251 if (!th->t_trans_id) { 3252 WARN_ON(1); 3253 return -EIO; 3254 } 3255 3256 th->t_refcount--; 3257 if (th->t_refcount > 0) { 3258 struct reiserfs_transaction_handle *cur_th = 3259 current->journal_info; 3260 3261 /* we aren't allowed to close a nested transaction on a different 3262 ** filesystem from the one in the task struct 3263 */ 3264 if (cur_th->t_super != th->t_super) 3265 BUG(); 3266 3267 if (th != cur_th) { 3268 memcpy(current->journal_info, th, sizeof(*th)); 3269 th->t_trans_id = 0; 3270 } 3271 return 0; 3272 } else { 3273 return do_journal_end(th, p_s_sb, nblocks, 0); 3274 } 3275} 3276 3277/* removes from the current transaction, relsing and descrementing any counters. 3278** also files the removed buffer directly onto the clean list 3279** 3280** called by journal_mark_freed when a block has been deleted 3281** 3282** returns 1 if it cleaned and relsed the buffer. 0 otherwise 3283*/ 3284static int remove_from_transaction(struct super_block *p_s_sb, 3285 b_blocknr_t blocknr, int already_cleaned) 3286{ 3287 struct buffer_head *bh; 3288 struct reiserfs_journal_cnode *cn; 3289 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3290 int ret = 0; 3291 3292 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr); 3293 if (!cn || !cn->bh) { 3294 return ret; 3295 } 3296 bh = cn->bh; 3297 if (cn->prev) { 3298 cn->prev->next = cn->next; 3299 } 3300 if (cn->next) { 3301 cn->next->prev = cn->prev; 3302 } 3303 if (cn == journal->j_first) { 3304 journal->j_first = cn->next; 3305 } 3306 if (cn == journal->j_last) { 3307 journal->j_last = cn->prev; 3308 } 3309 if (bh) 3310 remove_journal_hash(p_s_sb, journal->j_hash_table, NULL, 3311 bh->b_blocknr, 0); 3312 clear_buffer_journaled(bh); /* don't log this one */ 3313 3314 if (!already_cleaned) { 3315 clear_buffer_journal_dirty(bh); 3316 clear_buffer_dirty(bh); 3317 clear_buffer_journal_test(bh); 3318 put_bh(bh); 3319 if (atomic_read(&(bh->b_count)) < 0) { 3320 reiserfs_warning(p_s_sb, 3321 "journal-1752: remove from trans, b_count < 0"); 3322 } 3323 ret = 1; 3324 } 3325 journal->j_len--; 3326 journal->j_len_alloc--; 3327 free_cnode(p_s_sb, cn); 3328 return ret; 3329} 3330 3331/* 3332** for any cnode in a journal list, it can only be dirtied of all the 3333** transactions that include it are commited to disk. 3334** this checks through each transaction, and returns 1 if you are allowed to dirty, 3335** and 0 if you aren't 3336** 3337** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log 3338** blocks for a given transaction on disk 3339** 3340*/ 3341static int can_dirty(struct reiserfs_journal_cnode *cn) 3342{ 3343 struct super_block *sb = cn->sb; 3344 b_blocknr_t blocknr = cn->blocknr; 3345 struct reiserfs_journal_cnode *cur = cn->hprev; 3346 int can_dirty = 1; 3347 3348 /* first test hprev. These are all newer than cn, so any node here 3349 ** with the same block number and dev means this node can't be sent 3350 ** to disk right now. 3351 */ 3352 while (cur && can_dirty) { 3353 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb && 3354 cur->blocknr == blocknr) { 3355 can_dirty = 0; 3356 } 3357 cur = cur->hprev; 3358 } 3359 /* then test hnext. These are all older than cn. As long as they 3360 ** are committed to the log, it is safe to write cn to disk 3361 */ 3362 cur = cn->hnext; 3363 while (cur && can_dirty) { 3364 if (cur->jlist && cur->jlist->j_len > 0 && 3365 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh && 3366 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { 3367 can_dirty = 0; 3368 } 3369 cur = cur->hnext; 3370 } 3371 return can_dirty; 3372} 3373 3374/* syncs the commit blocks, but does not force the real buffers to disk 3375** will wait until the current transaction is done/commited before returning 3376*/ 3377int journal_end_sync(struct reiserfs_transaction_handle *th, 3378 struct super_block *p_s_sb, unsigned long nblocks) 3379{ 3380 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3381 3382 BUG_ON(!th->t_trans_id); 3383 /* you can sync while nested, very, very bad */ 3384 if (th->t_refcount > 1) { 3385 BUG(); 3386 } 3387 if (journal->j_len == 0) { 3388 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 3389 1); 3390 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)); 3391 } 3392 return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT); 3393} 3394 3395/* 3396** writeback the pending async commits to disk 3397*/ 3398static void flush_async_commits(void *p) 3399{ 3400 struct super_block *p_s_sb = p; 3401 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3402 struct reiserfs_journal_list *jl; 3403 struct list_head *entry; 3404 3405 lock_kernel(); 3406 if (!list_empty(&journal->j_journal_list)) { 3407 /* last entry is the youngest, commit it and you get everything */ 3408 entry = journal->j_journal_list.prev; 3409 jl = JOURNAL_LIST_ENTRY(entry); 3410 flush_commit_list(p_s_sb, jl, 1); 3411 } 3412 unlock_kernel(); 3413 /* 3414 * this is a little racey, but there's no harm in missing 3415 * the filemap_fdata_write 3416 */ 3417 if (!atomic_read(&journal->j_async_throttle) 3418 && !reiserfs_is_journal_aborted(journal)) { 3419 atomic_inc(&journal->j_async_throttle); 3420 filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping); 3421 atomic_dec(&journal->j_async_throttle); 3422 } 3423} 3424 3425/* 3426** flushes any old transactions to disk 3427** ends the current transaction if it is too old 3428*/ 3429int reiserfs_flush_old_commits(struct super_block *p_s_sb) 3430{ 3431 time_t now; 3432 struct reiserfs_transaction_handle th; 3433 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3434 3435 now = get_seconds(); 3436 /* safety check so we don't flush while we are replaying the log during 3437 * mount 3438 */ 3439 if (list_empty(&journal->j_journal_list)) { 3440 return 0; 3441 } 3442 3443 /* check the current transaction. If there are no writers, and it is 3444 * too old, finish it, and force the commit blocks to disk 3445 */ 3446 if (atomic_read(&journal->j_wcount) <= 0 && 3447 journal->j_trans_start_time > 0 && 3448 journal->j_len > 0 && 3449 (now - journal->j_trans_start_time) > journal->j_max_trans_age) { 3450 if (!journal_join(&th, p_s_sb, 1)) { 3451 reiserfs_prepare_for_journal(p_s_sb, 3452 SB_BUFFER_WITH_SB(p_s_sb), 3453 1); 3454 journal_mark_dirty(&th, p_s_sb, 3455 SB_BUFFER_WITH_SB(p_s_sb)); 3456 3457 /* we're only being called from kreiserfsd, it makes no sense to do 3458 ** an async commit so that kreiserfsd can do it later 3459 */ 3460 do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT); 3461 } 3462 } 3463 return p_s_sb->s_dirt; 3464} 3465 3466/* 3467** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit 3468** 3469** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all 3470** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just 3471** flushes the commit list and returns 0. 3472** 3473** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait. 3474** 3475** Note, we can't allow the journal_end to proceed while there are still writers in the log. 3476*/ 3477static int check_journal_end(struct reiserfs_transaction_handle *th, 3478 struct super_block *p_s_sb, unsigned long nblocks, 3479 int flags) 3480{ 3481 3482 time_t now; 3483 int flush = flags & FLUSH_ALL; 3484 int commit_now = flags & COMMIT_NOW; 3485 int wait_on_commit = flags & WAIT; 3486 struct reiserfs_journal_list *jl; 3487 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3488 3489 BUG_ON(!th->t_trans_id); 3490 3491 if (th->t_trans_id != journal->j_trans_id) { 3492 reiserfs_panic(th->t_super, 3493 "journal-1577: handle trans id %ld != current trans id %ld\n", 3494 th->t_trans_id, journal->j_trans_id); 3495 } 3496 3497 journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged); 3498 if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */ 3499 atomic_dec(&(journal->j_wcount)); 3500 } 3501 3502 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released 3503 ** will be dealt with by next transaction that actually writes something, but should be taken 3504 ** care of in this trans 3505 */ 3506 if (journal->j_len == 0) { 3507 BUG(); 3508 } 3509 /* if wcount > 0, and we are called to with flush or commit_now, 3510 ** we wait on j_join_wait. We will wake up when the last writer has 3511 ** finished the transaction, and started it on its way to the disk. 3512 ** Then, we flush the commit or journal list, and just return 0 3513 ** because the rest of journal end was already done for this transaction. 3514 */ 3515 if (atomic_read(&(journal->j_wcount)) > 0) { 3516 if (flush || commit_now) { 3517 unsigned trans_id; 3518 3519 jl = journal->j_current_jl; 3520 trans_id = jl->j_trans_id; 3521 if (wait_on_commit) 3522 jl->j_state |= LIST_COMMIT_PENDING; 3523 atomic_set(&(journal->j_jlock), 1); 3524 if (flush) { 3525 journal->j_next_full_flush = 1; 3526 } 3527 unlock_journal(p_s_sb); 3528 3529 /* sleep while the current transaction is still j_jlocked */ 3530 while (journal->j_trans_id == trans_id) { 3531 if (atomic_read(&journal->j_jlock)) { 3532 queue_log_writer(p_s_sb); 3533 } else { 3534 lock_journal(p_s_sb); 3535 if (journal->j_trans_id == trans_id) { 3536 atomic_set(&(journal->j_jlock), 3537 1); 3538 } 3539 unlock_journal(p_s_sb); 3540 } 3541 } 3542 if (journal->j_trans_id == trans_id) { 3543 BUG(); 3544 } 3545 if (commit_now 3546 && journal_list_still_alive(p_s_sb, trans_id) 3547 && wait_on_commit) { 3548 flush_commit_list(p_s_sb, jl, 1); 3549 } 3550 return 0; 3551 } 3552 unlock_journal(p_s_sb); 3553 return 0; 3554 } 3555 3556 /* deal with old transactions where we are the last writers */ 3557 now = get_seconds(); 3558 if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) { 3559 commit_now = 1; 3560 journal->j_next_async_flush = 1; 3561 } 3562 /* don't batch when someone is waiting on j_join_wait */ 3563 /* don't batch when syncing the commit or flushing the whole trans */ 3564 if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) 3565 && !flush && !commit_now && (journal->j_len < journal->j_max_batch) 3566 && journal->j_len_alloc < journal->j_max_batch 3567 && journal->j_cnode_free > (journal->j_trans_max * 3)) { 3568 journal->j_bcount++; 3569 unlock_journal(p_s_sb); 3570 return 0; 3571 } 3572 3573 if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) { 3574 reiserfs_panic(p_s_sb, 3575 "journal-003: journal_end: j_start (%ld) is too high\n", 3576 journal->j_start); 3577 } 3578 return 1; 3579} 3580 3581/* 3582** Does all the work that makes deleting blocks safe. 3583** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on. 3584** 3585** otherwise: 3586** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes 3587** before this transaction has finished. 3588** 3589** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with 3590** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash, 3591** the block can't be reallocated yet. 3592** 3593** Then remove it from the current transaction, decrementing any counters and filing it on the clean list. 3594*/ 3595int journal_mark_freed(struct reiserfs_transaction_handle *th, 3596 struct super_block *p_s_sb, b_blocknr_t blocknr) 3597{ 3598 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3599 struct reiserfs_journal_cnode *cn = NULL; 3600 struct buffer_head *bh = NULL; 3601 struct reiserfs_list_bitmap *jb = NULL; 3602 int cleaned = 0; 3603 BUG_ON(!th->t_trans_id); 3604 3605 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr); 3606 if (cn && cn->bh) { 3607 bh = cn->bh; 3608 get_bh(bh); 3609 } 3610 /* if it is journal new, we just remove it from this transaction */ 3611 if (bh && buffer_journal_new(bh)) { 3612 clear_buffer_journal_new(bh); 3613 clear_prepared_bits(bh); 3614 reiserfs_clean_and_file_buffer(bh); 3615 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned); 3616 } else { 3617 /* set the bit for this block in the journal bitmap for this transaction */ 3618 jb = journal->j_current_jl->j_list_bitmap; 3619 if (!jb) { 3620 reiserfs_panic(p_s_sb, 3621 "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n"); 3622 } 3623 set_bit_in_list_bitmap(p_s_sb, blocknr, jb); 3624 3625 /* Note, the entire while loop is not allowed to schedule. */ 3626 3627 if (bh) { 3628 clear_prepared_bits(bh); 3629 reiserfs_clean_and_file_buffer(bh); 3630 } 3631 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned); 3632 3633 /* find all older transactions with this block, make sure they don't try to write it out */ 3634 cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, 3635 blocknr); 3636 while (cn) { 3637 if (p_s_sb == cn->sb && blocknr == cn->blocknr) { 3638 set_bit(BLOCK_FREED, &cn->state); 3639 if (cn->bh) { 3640 if (!cleaned) { 3641 /* remove_from_transaction will brelse the buffer if it was 3642 ** in the current trans 3643 */ 3644 clear_buffer_journal_dirty(cn-> 3645 bh); 3646 clear_buffer_dirty(cn->bh); 3647 clear_buffer_journal_test(cn-> 3648 bh); 3649 cleaned = 1; 3650 put_bh(cn->bh); 3651 if (atomic_read 3652 (&(cn->bh->b_count)) < 0) { 3653 reiserfs_warning(p_s_sb, 3654 "journal-2138: cn->bh->b_count < 0"); 3655 } 3656 } 3657 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */ 3658 atomic_dec(& 3659 (cn->jlist-> 3660 j_nonzerolen)); 3661 } 3662 cn->bh = NULL; 3663 } 3664 } 3665 cn = cn->hnext; 3666 } 3667 } 3668 3669 if (bh) { 3670 put_bh(bh); /* get_hash grabs the buffer */ 3671 if (atomic_read(&(bh->b_count)) < 0) { 3672 reiserfs_warning(p_s_sb, 3673 "journal-2165: bh->b_count < 0"); 3674 } 3675 } 3676 return 0; 3677} 3678 3679void reiserfs_update_inode_transaction(struct inode *inode) 3680{ 3681 struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb); 3682 REISERFS_I(inode)->i_jl = journal->j_current_jl; 3683 REISERFS_I(inode)->i_trans_id = journal->j_trans_id; 3684} 3685 3686/* 3687 * returns -1 on error, 0 if no commits/barriers were done and 1 3688 * if a transaction was actually committed and the barrier was done 3689 */ 3690static int __commit_trans_jl(struct inode *inode, unsigned long id, 3691 struct reiserfs_journal_list *jl) 3692{ 3693 struct reiserfs_transaction_handle th; 3694 struct super_block *sb = inode->i_sb; 3695 struct reiserfs_journal *journal = SB_JOURNAL(sb); 3696 int ret = 0; 3697 3698 /* is it from the current transaction, or from an unknown transaction? */ 3699 if (id == journal->j_trans_id) { 3700 jl = journal->j_current_jl; 3701 /* try to let other writers come in and grow this transaction */ 3702 let_transaction_grow(sb, id); 3703 if (journal->j_trans_id != id) { 3704 goto flush_commit_only; 3705 } 3706 3707 ret = journal_begin(&th, sb, 1); 3708 if (ret) 3709 return ret; 3710 3711 /* someone might have ended this transaction while we joined */ 3712 if (journal->j_trans_id != id) { 3713 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 3714 1); 3715 journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)); 3716 ret = journal_end(&th, sb, 1); 3717 goto flush_commit_only; 3718 } 3719 3720 ret = journal_end_sync(&th, sb, 1); 3721 if (!ret) 3722 ret = 1; 3723 3724 } else { 3725 /* this gets tricky, we have to make sure the journal list in 3726 * the inode still exists. We know the list is still around 3727 * if we've got a larger transaction id than the oldest list 3728 */ 3729 flush_commit_only: 3730 if (journal_list_still_alive(inode->i_sb, id)) { 3731 /* 3732 * we only set ret to 1 when we know for sure 3733 * the barrier hasn't been started yet on the commit 3734 * block. 3735 */ 3736 if (atomic_read(&jl->j_commit_left) > 1) 3737 ret = 1; 3738 flush_commit_list(sb, jl, 1); 3739 if (journal->j_errno) 3740 ret = journal->j_errno; 3741 } 3742 } 3743 /* otherwise the list is gone, and long since committed */ 3744 return ret; 3745} 3746 3747int reiserfs_commit_for_inode(struct inode *inode) 3748{ 3749 unsigned long id = REISERFS_I(inode)->i_trans_id; 3750 struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl; 3751 3752 /* for the whole inode, assume unset id means it was 3753 * changed in the current transaction. More conservative 3754 */ 3755 if (!id || !jl) { 3756 reiserfs_update_inode_transaction(inode); 3757 id = REISERFS_I(inode)->i_trans_id; 3758 /* jl will be updated in __commit_trans_jl */ 3759 } 3760 3761 return __commit_trans_jl(inode, id, jl); 3762} 3763 3764void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb, 3765 struct buffer_head *bh) 3766{ 3767 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3768 PROC_INFO_INC(p_s_sb, journal.restore_prepared); 3769 if (!bh) { 3770 return; 3771 } 3772 if (test_clear_buffer_journal_restore_dirty(bh) && 3773 buffer_journal_dirty(bh)) { 3774 struct reiserfs_journal_cnode *cn; 3775 cn = get_journal_hash_dev(p_s_sb, 3776 journal->j_list_hash_table, 3777 bh->b_blocknr); 3778 if (cn && can_dirty(cn)) { 3779 set_buffer_journal_test(bh); 3780 mark_buffer_dirty(bh); 3781 } 3782 } 3783 clear_buffer_journal_prepared(bh); 3784} 3785 3786extern struct tree_balance *cur_tb; 3787/* 3788** before we can change a metadata block, we have to make sure it won't 3789** be written to disk while we are altering it. So, we must: 3790** clean it 3791** wait on it. 3792** 3793*/ 3794int reiserfs_prepare_for_journal(struct super_block *p_s_sb, 3795 struct buffer_head *bh, int wait) 3796{ 3797 PROC_INFO_INC(p_s_sb, journal.prepare); 3798 3799 if (test_set_buffer_locked(bh)) { 3800 if (!wait) 3801 return 0; 3802 lock_buffer(bh); 3803 } 3804 set_buffer_journal_prepared(bh); 3805 if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) { 3806 clear_buffer_journal_test(bh); 3807 set_buffer_journal_restore_dirty(bh); 3808 } 3809 unlock_buffer(bh); 3810 return 1; 3811} 3812 3813static void flush_old_journal_lists(struct super_block *s) 3814{ 3815 struct reiserfs_journal *journal = SB_JOURNAL(s); 3816 struct reiserfs_journal_list *jl; 3817 struct list_head *entry; 3818 time_t now = get_seconds(); 3819 3820 while (!list_empty(&journal->j_journal_list)) { 3821 entry = journal->j_journal_list.next; 3822 jl = JOURNAL_LIST_ENTRY(entry); 3823 /* this check should always be run, to send old lists to disk */ 3824 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) { 3825 flush_used_journal_lists(s, jl); 3826 } else { 3827 break; 3828 } 3829 } 3830} 3831 3832/* 3833** long and ugly. If flush, will not return until all commit 3834** blocks and all real buffers in the trans are on disk. 3835** If no_async, won't return until all commit blocks are on disk. 3836** 3837** keep reading, there are comments as you go along 3838** 3839** If the journal is aborted, we just clean up. Things like flushing 3840** journal lists, etc just won't happen. 3841*/ 3842static int do_journal_end(struct reiserfs_transaction_handle *th, 3843 struct super_block *p_s_sb, unsigned long nblocks, 3844 int flags) 3845{ 3846 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3847 struct reiserfs_journal_cnode *cn, *next, *jl_cn; 3848 struct reiserfs_journal_cnode *last_cn = NULL; 3849 struct reiserfs_journal_desc *desc; 3850 struct reiserfs_journal_commit *commit; 3851 struct buffer_head *c_bh; /* commit bh */ 3852 struct buffer_head *d_bh; /* desc bh */ 3853 int cur_write_start = 0; /* start index of current log write */ 3854 int old_start; 3855 int i; 3856 int flush = flags & FLUSH_ALL; 3857 int wait_on_commit = flags & WAIT; 3858 struct reiserfs_journal_list *jl, *temp_jl; 3859 struct list_head *entry, *safe; 3860 unsigned long jindex; 3861 unsigned long commit_trans_id; 3862 int trans_half; 3863 3864 BUG_ON(th->t_refcount > 1); 3865 BUG_ON(!th->t_trans_id); 3866 3867 put_fs_excl(); 3868 current->journal_info = th->t_handle_save; 3869 reiserfs_check_lock_depth(p_s_sb, "journal end"); 3870 if (journal->j_len == 0) { 3871 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 3872 1); 3873 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)); 3874 } 3875 3876 lock_journal(p_s_sb); 3877 if (journal->j_next_full_flush) { 3878 flags |= FLUSH_ALL; 3879 flush = 1; 3880 } 3881 if (journal->j_next_async_flush) { 3882 flags |= COMMIT_NOW | WAIT; 3883 wait_on_commit = 1; 3884 } 3885 3886 /* check_journal_end locks the journal, and unlocks if it does not return 1 3887 ** it tells us if we should continue with the journal_end, or just return 3888 */ 3889 if (!check_journal_end(th, p_s_sb, nblocks, flags)) { 3890 p_s_sb->s_dirt = 1; 3891 wake_queued_writers(p_s_sb); 3892 reiserfs_async_progress_wait(p_s_sb); 3893 goto out; 3894 } 3895 3896 /* check_journal_end might set these, check again */ 3897 if (journal->j_next_full_flush) { 3898 flush = 1; 3899 } 3900 3901 /* 3902 ** j must wait means we have to flush the log blocks, and the real blocks for 3903 ** this transaction 3904 */ 3905 if (journal->j_must_wait > 0) { 3906 flush = 1; 3907 } 3908#ifdef REISERFS_PREALLOCATE 3909 /* quota ops might need to nest, setup the journal_info pointer for them */ 3910 current->journal_info = th; 3911 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into 3912 * the transaction */ 3913 current->journal_info = th->t_handle_save; 3914#endif 3915 3916 /* setup description block */ 3917 d_bh = 3918 journal_getblk(p_s_sb, 3919 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 3920 journal->j_start); 3921 set_buffer_uptodate(d_bh); 3922 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data; 3923 memset(d_bh->b_data, 0, d_bh->b_size); 3924 memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8); 3925 set_desc_trans_id(desc, journal->j_trans_id); 3926 3927 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */ 3928 c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 3929 ((journal->j_start + journal->j_len + 3930 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))); 3931 commit = (struct reiserfs_journal_commit *)c_bh->b_data; 3932 memset(c_bh->b_data, 0, c_bh->b_size); 3933 set_commit_trans_id(commit, journal->j_trans_id); 3934 set_buffer_uptodate(c_bh); 3935 3936 /* init this journal list */ 3937 jl = journal->j_current_jl; 3938 3939 /* we lock the commit before doing anything because 3940 * we want to make sure nobody tries to run flush_commit_list until 3941 * the new transaction is fully setup, and we've already flushed the 3942 * ordered bh list 3943 */ 3944 down(&jl->j_commit_lock); 3945 3946 /* save the transaction id in case we need to commit it later */ 3947 commit_trans_id = jl->j_trans_id; 3948 3949 atomic_set(&jl->j_older_commits_done, 0); 3950 jl->j_trans_id = journal->j_trans_id; 3951 jl->j_timestamp = journal->j_trans_start_time; 3952 jl->j_commit_bh = c_bh; 3953 jl->j_start = journal->j_start; 3954 jl->j_len = journal->j_len; 3955 atomic_set(&jl->j_nonzerolen, journal->j_len); 3956 atomic_set(&jl->j_commit_left, journal->j_len + 2); 3957 jl->j_realblock = NULL; 3958 3959 /* The ENTIRE FOR LOOP MUST not cause schedule to occur. 3960 ** for each real block, add it to the journal list hash, 3961 ** copy into real block index array in the commit or desc block 3962 */ 3963 trans_half = journal_trans_half(p_s_sb->s_blocksize); 3964 for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) { 3965 if (buffer_journaled(cn->bh)) { 3966 jl_cn = get_cnode(p_s_sb); 3967 if (!jl_cn) { 3968 reiserfs_panic(p_s_sb, 3969 "journal-1676, get_cnode returned NULL\n"); 3970 } 3971 if (i == 0) { 3972 jl->j_realblock = jl_cn; 3973 } 3974 jl_cn->prev = last_cn; 3975 jl_cn->next = NULL; 3976 if (last_cn) { 3977 last_cn->next = jl_cn; 3978 } 3979 last_cn = jl_cn; 3980 /* make sure the block we are trying to log is not a block 3981 of journal or reserved area */ 3982 3983 if (is_block_in_log_or_reserved_area 3984 (p_s_sb, cn->bh->b_blocknr)) { 3985 reiserfs_panic(p_s_sb, 3986 "journal-2332: Trying to log block %lu, which is a log block\n", 3987 cn->bh->b_blocknr); 3988 } 3989 jl_cn->blocknr = cn->bh->b_blocknr; 3990 jl_cn->state = 0; 3991 jl_cn->sb = p_s_sb; 3992 jl_cn->bh = cn->bh; 3993 jl_cn->jlist = jl; 3994 insert_journal_hash(journal->j_list_hash_table, jl_cn); 3995 if (i < trans_half) { 3996 desc->j_realblock[i] = 3997 cpu_to_le32(cn->bh->b_blocknr); 3998 } else { 3999 commit->j_realblock[i - trans_half] = 4000 cpu_to_le32(cn->bh->b_blocknr); 4001 } 4002 } else { 4003 i--; 4004 } 4005 } 4006 set_desc_trans_len(desc, journal->j_len); 4007 set_desc_mount_id(desc, journal->j_mount_id); 4008 set_desc_trans_id(desc, journal->j_trans_id); 4009 set_commit_trans_len(commit, journal->j_len); 4010 4011 /* special check in case all buffers in the journal were marked for not logging */ 4012 if (journal->j_len == 0) { 4013 BUG(); 4014 } 4015 4016 /* we're about to dirty all the log blocks, mark the description block 4017 * dirty now too. Don't mark the commit block dirty until all the 4018 * others are on disk 4019 */ 4020 mark_buffer_dirty(d_bh); 4021 4022 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */ 4023 cur_write_start = journal->j_start; 4024 cn = journal->j_first; 4025 jindex = 1; /* start at one so we don't get the desc again */ 4026 while (cn) { 4027 clear_buffer_journal_new(cn->bh); 4028 /* copy all the real blocks into log area. dirty log blocks */ 4029 if (buffer_journaled(cn->bh)) { 4030 struct buffer_head *tmp_bh; 4031 char *addr; 4032 struct page *page; 4033 tmp_bh = 4034 journal_getblk(p_s_sb, 4035 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 4036 ((cur_write_start + 4037 jindex) % 4038 SB_ONDISK_JOURNAL_SIZE(p_s_sb))); 4039 set_buffer_uptodate(tmp_bh); 4040 page = cn->bh->b_page; 4041 addr = kmap(page); 4042 memcpy(tmp_bh->b_data, 4043 addr + offset_in_page(cn->bh->b_data), 4044 cn->bh->b_size); 4045 kunmap(page); 4046 mark_buffer_dirty(tmp_bh); 4047 jindex++; 4048 set_buffer_journal_dirty(cn->bh); 4049 clear_buffer_journaled(cn->bh); 4050 } else { 4051 /* JDirty cleared sometime during transaction. don't log this one */ 4052 reiserfs_warning(p_s_sb, 4053 "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!"); 4054 brelse(cn->bh); 4055 } 4056 next = cn->next; 4057 free_cnode(p_s_sb, cn); 4058 cn = next; 4059 cond_resched(); 4060 } 4061 4062 /* we are done with both the c_bh and d_bh, but 4063 ** c_bh must be written after all other commit blocks, 4064 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1. 4065 */ 4066 4067 journal->j_current_jl = alloc_journal_list(p_s_sb); 4068 4069 /* now it is safe to insert this transaction on the main list */ 4070 list_add_tail(&jl->j_list, &journal->j_journal_list); 4071 list_add_tail(&jl->j_working_list, &journal->j_working_list); 4072 journal->j_num_work_lists++; 4073 4074 /* reset journal values for the next transaction */ 4075 old_start = journal->j_start; 4076 journal->j_start = 4077 (journal->j_start + journal->j_len + 4078 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb); 4079 atomic_set(&(journal->j_wcount), 0); 4080 journal->j_bcount = 0; 4081 journal->j_last = NULL; 4082 journal->j_first = NULL; 4083 journal->j_len = 0; 4084 journal->j_trans_start_time = 0; 4085 journal->j_trans_id++; 4086 journal->j_current_jl->j_trans_id = journal->j_trans_id; 4087 journal->j_must_wait = 0; 4088 journal->j_len_alloc = 0; 4089 journal->j_next_full_flush = 0; 4090 journal->j_next_async_flush = 0; 4091 init_journal_hash(p_s_sb); 4092 4093 // make sure reiserfs_add_jh sees the new current_jl before we 4094 // write out the tails 4095 smp_mb(); 4096 4097 /* tail conversion targets have to hit the disk before we end the 4098 * transaction. Otherwise a later transaction might repack the tail 4099 * before this transaction commits, leaving the data block unflushed and 4100 * clean, if we crash before the later transaction commits, the data block 4101 * is lost. 4102 */ 4103 if (!list_empty(&jl->j_tail_bh_list)) { 4104 unlock_kernel(); 4105 write_ordered_buffers(&journal->j_dirty_buffers_lock, 4106 journal, jl, &jl->j_tail_bh_list); 4107 lock_kernel(); 4108 } 4109 if (!list_empty(&jl->j_tail_bh_list)) 4110 BUG(); 4111 up(&jl->j_commit_lock); 4112 4113 /* honor the flush wishes from the caller, simple commits can 4114 ** be done outside the journal lock, they are done below 4115 ** 4116 ** if we don't flush the commit list right now, we put it into 4117 ** the work queue so the people waiting on the async progress work 4118 ** queue don't wait for this proc to flush journal lists and such. 4119 */ 4120 if (flush) { 4121 flush_commit_list(p_s_sb, jl, 1); 4122 flush_journal_list(p_s_sb, jl, 1); 4123 } else if (!(jl->j_state & LIST_COMMIT_PENDING)) 4124 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10); 4125 4126 /* if the next transaction has any chance of wrapping, flush 4127 ** transactions that might get overwritten. If any journal lists are very 4128 ** old flush them as well. 4129 */ 4130 first_jl: 4131 list_for_each_safe(entry, safe, &journal->j_journal_list) { 4132 temp_jl = JOURNAL_LIST_ENTRY(entry); 4133 if (journal->j_start <= temp_jl->j_start) { 4134 if ((journal->j_start + journal->j_trans_max + 1) >= 4135 temp_jl->j_start) { 4136 flush_used_journal_lists(p_s_sb, temp_jl); 4137 goto first_jl; 4138 } else if ((journal->j_start + 4139 journal->j_trans_max + 1) < 4140 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) { 4141 /* if we don't cross into the next transaction and we don't 4142 * wrap, there is no way we can overlap any later transactions 4143 * break now 4144 */ 4145 break; 4146 } 4147 } else if ((journal->j_start + 4148 journal->j_trans_max + 1) > 4149 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) { 4150 if (((journal->j_start + journal->j_trans_max + 1) % 4151 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >= 4152 temp_jl->j_start) { 4153 flush_used_journal_lists(p_s_sb, temp_jl); 4154 goto first_jl; 4155 } else { 4156 /* we don't overlap anything from out start to the end of the 4157 * log, and our wrapped portion doesn't overlap anything at 4158 * the start of the log. We can break 4159 */ 4160 break; 4161 } 4162 } 4163 } 4164 flush_old_journal_lists(p_s_sb); 4165 4166 journal->j_current_jl->j_list_bitmap = 4167 get_list_bitmap(p_s_sb, journal->j_current_jl); 4168 4169 if (!(journal->j_current_jl->j_list_bitmap)) { 4170 reiserfs_panic(p_s_sb, 4171 "journal-1996: do_journal_end, could not get a list bitmap\n"); 4172 } 4173 4174 atomic_set(&(journal->j_jlock), 0); 4175 unlock_journal(p_s_sb); 4176 /* wake up any body waiting to join. */ 4177 clear_bit(J_WRITERS_QUEUED, &journal->j_state); 4178 wake_up(&(journal->j_join_wait)); 4179 4180 if (!flush && wait_on_commit && 4181 journal_list_still_alive(p_s_sb, commit_trans_id)) { 4182 flush_commit_list(p_s_sb, jl, 1); 4183 } 4184 out: 4185 reiserfs_check_lock_depth(p_s_sb, "journal end2"); 4186 4187 memset(th, 0, sizeof(*th)); 4188 /* Re-set th->t_super, so we can properly keep track of how many 4189 * persistent transactions there are. We need to do this so if this 4190 * call is part of a failed restart_transaction, we can free it later */ 4191 th->t_super = p_s_sb; 4192 4193 return journal->j_errno; 4194} 4195 4196static void __reiserfs_journal_abort_hard(struct super_block *sb) 4197{ 4198 struct reiserfs_journal *journal = SB_JOURNAL(sb); 4199 if (test_bit(J_ABORTED, &journal->j_state)) 4200 return; 4201 4202 printk(KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n", 4203 reiserfs_bdevname(sb)); 4204 4205 sb->s_flags |= MS_RDONLY; 4206 set_bit(J_ABORTED, &journal->j_state); 4207 4208#ifdef CONFIG_REISERFS_CHECK 4209 dump_stack(); 4210#endif 4211} 4212 4213static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno) 4214{ 4215 struct reiserfs_journal *journal = SB_JOURNAL(sb); 4216 if (test_bit(J_ABORTED, &journal->j_state)) 4217 return; 4218 4219 if (!journal->j_errno) 4220 journal->j_errno = errno; 4221 4222 __reiserfs_journal_abort_hard(sb); 4223} 4224 4225void reiserfs_journal_abort(struct super_block *sb, int errno) 4226{ 4227 return __reiserfs_journal_abort_soft(sb, errno); 4228} 4229