1/* 2 * linux/fs/jbd2/revoke.c 3 * 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000 5 * 6 * Copyright 2000 Red Hat corp --- All Rights Reserved 7 * 8 * This file is part of the Linux kernel and is made available under 9 * the terms of the GNU General Public License, version 2, or at your 10 * option, any later version, incorporated herein by reference. 11 * 12 * Journal revoke routines for the generic filesystem journaling code; 13 * part of the ext2fs journaling system. 14 * 15 * Revoke is the mechanism used to prevent old log records for deleted 16 * metadata from being replayed on top of newer data using the same 17 * blocks. The revoke mechanism is used in two separate places: 18 * 19 * + Commit: during commit we write the entire list of the current 20 * transaction's revoked blocks to the journal 21 * 22 * + Recovery: during recovery we record the transaction ID of all 23 * revoked blocks. If there are multiple revoke records in the log 24 * for a single block, only the last one counts, and if there is a log 25 * entry for a block beyond the last revoke, then that log entry still 26 * gets replayed. 27 * 28 * We can get interactions between revokes and new log data within a 29 * single transaction: 30 * 31 * Block is revoked and then journaled: 32 * The desired end result is the journaling of the new block, so we 33 * cancel the revoke before the transaction commits. 34 * 35 * Block is journaled and then revoked: 36 * The revoke must take precedence over the write of the block, so we 37 * need either to cancel the journal entry or to write the revoke 38 * later in the log than the log block. In this case, we choose the 39 * latter: journaling a block cancels any revoke record for that block 40 * in the current transaction, so any revoke for that block in the 41 * transaction must have happened after the block was journaled and so 42 * the revoke must take precedence. 43 * 44 * Block is revoked and then written as data: 45 * The data write is allowed to succeed, but the revoke is _not_ 46 * cancelled. We still need to prevent old log records from 47 * overwriting the new data. We don't even need to clear the revoke 48 * bit here. 49 * 50 * We cache revoke status of a buffer in the current transaction in b_states 51 * bits. As the name says, revokevalid flag indicates that the cached revoke 52 * status of a buffer is valid and we can rely on the cached status. 53 * 54 * Revoke information on buffers is a tri-state value: 55 * 56 * RevokeValid clear: no cached revoke status, need to look it up 57 * RevokeValid set, Revoked clear: 58 * buffer has not been revoked, and cancel_revoke 59 * need do nothing. 60 * RevokeValid set, Revoked set: 61 * buffer has been revoked. 62 * 63 * Locking rules: 64 * We keep two hash tables of revoke records. One hashtable belongs to the 65 * running transaction (is pointed to by journal->j_revoke), the other one 66 * belongs to the committing transaction. Accesses to the second hash table 67 * happen only from the kjournald and no other thread touches this table. Also 68 * journal_switch_revoke_table() which switches which hashtable belongs to the 69 * running and which to the committing transaction is called only from 70 * kjournald. Therefore we need no locks when accessing the hashtable belonging 71 * to the committing transaction. 72 * 73 * All users operating on the hash table belonging to the running transaction 74 * have a handle to the transaction. Therefore they are safe from kjournald 75 * switching hash tables under them. For operations on the lists of entries in 76 * the hash table j_revoke_lock is used. 77 * 78 * Finally, also replay code uses the hash tables but at this moment no one else 79 * can touch them (filesystem isn't mounted yet) and hence no locking is 80 * needed. 81 */ 82 83#ifndef __KERNEL__ 84#include "jfs_user.h" 85#else 86#include <linux/time.h> 87#include <linux/fs.h> 88#include <linux/jbd2.h> 89#include <linux/errno.h> 90#include <linux/slab.h> 91#include <linux/list.h> 92#include <linux/init.h> 93#include <linux/bio.h> 94#include <linux/log2.h> 95#endif 96 97static lkmem_cache_t *jbd2_revoke_record_cache; 98static lkmem_cache_t *jbd2_revoke_table_cache; 99 100/* Each revoke record represents one single revoked block. During 101 journal replay, this involves recording the transaction ID of the 102 last transaction to revoke this block. */ 103 104struct jbd2_revoke_record_s 105{ 106 struct list_head hash; 107 tid_t sequence; /* Used for recovery only */ 108 unsigned long long blocknr; 109}; 110 111 112/* The revoke table is just a simple hash table of revoke records. */ 113struct jbd2_revoke_table_s 114{ 115 /* It is conceivable that we might want a larger hash table 116 * for recovery. Must be a power of two. */ 117 int hash_size; 118 int hash_shift; 119 struct list_head *hash_table; 120}; 121 122 123#ifdef __KERNEL__ 124static void write_one_revoke_record(journal_t *, transaction_t *, 125 struct list_head *, 126 struct buffer_head **, int *, 127 struct jbd2_revoke_record_s *, int); 128static void flush_descriptor(journal_t *, struct buffer_head *, int, int); 129#endif 130 131/* Utility functions to maintain the revoke table */ 132 133/* Borrowed from buffer.c: this is a tried and tested block hash function */ 134static inline int hash(journal_t *journal, unsigned long long block) 135{ 136 struct jbd2_revoke_table_s *table = journal->j_revoke; 137 int hash_shift = table->hash_shift; 138 int hash = (int)block ^ (int)((block >> 31) >> 1); 139 140 return ((hash << (hash_shift - 6)) ^ 141 (hash >> 13) ^ 142 (hash << (hash_shift - 12))) & (table->hash_size - 1); 143} 144 145static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr, 146 tid_t seq) 147{ 148 struct list_head *hash_list; 149 struct jbd2_revoke_record_s *record; 150 151repeat: 152 record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS); 153 if (!record) 154 goto oom; 155 156 record->sequence = seq; 157 record->blocknr = blocknr; 158 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; 159 spin_lock(&journal->j_revoke_lock); 160 list_add(&record->hash, hash_list); 161 spin_unlock(&journal->j_revoke_lock); 162 return 0; 163 164oom: 165 if (!journal_oom_retry) 166 return -ENOMEM; 167 jbd_debug(1, "ENOMEM in %s, retrying\n", __func__); 168 yield(); 169 goto repeat; 170} 171 172/* Find a revoke record in the journal's hash table. */ 173 174static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal, 175 unsigned long long blocknr) 176{ 177 struct list_head *hash_list; 178 struct jbd2_revoke_record_s *record; 179 180 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; 181 182 spin_lock(&journal->j_revoke_lock); 183 record = (struct jbd2_revoke_record_s *) hash_list->next; 184 while (&(record->hash) != hash_list) { 185 if (record->blocknr == blocknr) { 186 spin_unlock(&journal->j_revoke_lock); 187 return record; 188 } 189 record = (struct jbd2_revoke_record_s *) record->hash.next; 190 } 191 spin_unlock(&journal->j_revoke_lock); 192 return NULL; 193} 194 195void journal_destroy_revoke_caches(void) 196{ 197 if (jbd2_revoke_record_cache) { 198 kmem_cache_destroy(jbd2_revoke_record_cache); 199 jbd2_revoke_record_cache = NULL; 200 } 201 if (jbd2_revoke_table_cache) { 202 kmem_cache_destroy(jbd2_revoke_table_cache); 203 jbd2_revoke_table_cache = NULL; 204 } 205} 206 207int __init journal_init_revoke_caches(void) 208{ 209 J_ASSERT(!jbd2_revoke_record_cache); 210 J_ASSERT(!jbd2_revoke_table_cache); 211 212 jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s, 213 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY); 214 if (!jbd2_revoke_record_cache) 215 goto record_cache_failure; 216 217 jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s, 218 SLAB_TEMPORARY); 219 if (!jbd2_revoke_table_cache) 220 goto table_cache_failure; 221 return 0; 222table_cache_failure: 223 journal_destroy_revoke_caches(); 224record_cache_failure: 225 return -ENOMEM; 226} 227 228static struct jbd2_revoke_table_s *journal_init_revoke_table(int hash_size) 229{ 230 int shift = 0; 231 int tmp = hash_size; 232 struct jbd2_revoke_table_s *table; 233 234 table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL); 235 if (!table) 236 goto out; 237 238 while((tmp >>= 1UL) != 0UL) 239 shift++; 240 241 table->hash_size = hash_size; 242 table->hash_shift = shift; 243 table->hash_table = 244 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL); 245 if (!table->hash_table) { 246 kmem_cache_free(jbd2_revoke_table_cache, table); 247 table = NULL; 248 goto out; 249 } 250 251 for (tmp = 0; tmp < hash_size; tmp++) 252 INIT_LIST_HEAD(&table->hash_table[tmp]); 253 254out: 255 return table; 256} 257 258static void journal_destroy_revoke_table(struct jbd2_revoke_table_s *table) 259{ 260 int i; 261 struct list_head *hash_list; 262 263 for (i = 0; i < table->hash_size; i++) { 264 hash_list = &table->hash_table[i]; 265 J_ASSERT(list_empty(hash_list)); 266 } 267 268 kfree(table->hash_table); 269 kmem_cache_free(jbd2_revoke_table_cache, table); 270} 271 272/* Initialise the revoke table for a given journal to a given size. */ 273int journal_init_revoke(journal_t *journal, int hash_size) 274{ 275 J_ASSERT(journal->j_revoke_table[0] == NULL); 276 J_ASSERT(is_power_of_2(hash_size)); 277 278 journal->j_revoke_table[0] = journal_init_revoke_table(hash_size); 279 if (!journal->j_revoke_table[0]) 280 goto fail0; 281 282 journal->j_revoke_table[1] = journal_init_revoke_table(hash_size); 283 if (!journal->j_revoke_table[1]) 284 goto fail1; 285 286 journal->j_revoke = journal->j_revoke_table[1]; 287 288 spin_lock_init(&journal->j_revoke_lock); 289 290 return 0; 291 292fail1: 293 journal_destroy_revoke_table(journal->j_revoke_table[0]); 294fail0: 295 return -ENOMEM; 296} 297 298/* Destroy a journal's revoke table. The table must already be empty! */ 299void journal_destroy_revoke(journal_t *journal) 300{ 301 journal->j_revoke = NULL; 302 if (journal->j_revoke_table[0]) 303 journal_destroy_revoke_table(journal->j_revoke_table[0]); 304 if (journal->j_revoke_table[1]) 305 journal_destroy_revoke_table(journal->j_revoke_table[1]); 306} 307 308 309#ifdef __KERNEL__ 310 311/* 312 * journal_revoke: revoke a given buffer_head from the journal. This 313 * prevents the block from being replayed during recovery if we take a 314 * crash after this current transaction commits. Any subsequent 315 * metadata writes of the buffer in this transaction cancel the 316 * revoke. 317 * 318 * Note that this call may block --- it is up to the caller to make 319 * sure that there are no further calls to journal_write_metadata 320 * before the revoke is complete. In ext3, this implies calling the 321 * revoke before clearing the block bitmap when we are deleting 322 * metadata. 323 * 324 * Revoke performs a journal_forget on any buffer_head passed in as a 325 * parameter, but does _not_ forget the buffer_head if the bh was only 326 * found implicitly. 327 * 328 * bh_in may not be a journalled buffer - it may have come off 329 * the hash tables without an attached journal_head. 330 * 331 * If bh_in is non-zero, journal_revoke() will decrement its b_count 332 * by one. 333 */ 334 335int journal_revoke(handle_t *handle, unsigned long long blocknr, 336 struct buffer_head *bh_in) 337{ 338 struct buffer_head *bh = NULL; 339 journal_t *journal; 340 struct block_device *bdev; 341 int err; 342 343 might_sleep(); 344 if (bh_in) 345 BUFFER_TRACE(bh_in, "enter"); 346 347 journal = handle->h_transaction->t_journal; 348 if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){ 349 J_ASSERT (!"Cannot set revoke feature!"); 350 return -EINVAL; 351 } 352 353 bdev = journal->j_fs_dev; 354 bh = bh_in; 355 356 if (!bh) { 357 bh = __find_get_block(bdev, blocknr, journal->j_blocksize); 358 if (bh) 359 BUFFER_TRACE(bh, "found on hash"); 360 } 361#ifdef JFS_EXPENSIVE_CHECKING 362 else { 363 struct buffer_head *bh2; 364 365 /* If there is a different buffer_head lying around in 366 * memory anywhere... */ 367 bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); 368 if (bh2) { 369 /* ... and it has RevokeValid status... */ 370 if (bh2 != bh && buffer_revokevalid(bh2)) 371 /* ...then it better be revoked too, 372 * since it's illegal to create a revoke 373 * record against a buffer_head which is 374 * not marked revoked --- that would 375 * risk missing a subsequent revoke 376 * cancel. */ 377 J_ASSERT_BH(bh2, buffer_revoked(bh2)); 378 put_bh(bh2); 379 } 380 } 381#endif 382 383 /* We really ought not ever to revoke twice in a row without 384 first having the revoke cancelled: it's illegal to free a 385 block twice without allocating it in between! */ 386 if (bh) { 387 if (!J_EXPECT_BH(bh, !buffer_revoked(bh), 388 "inconsistent data on disk")) { 389 if (!bh_in) 390 brelse(bh); 391 return -EIO; 392 } 393 set_buffer_revoked(bh); 394 set_buffer_revokevalid(bh); 395 if (bh_in) { 396 BUFFER_TRACE(bh_in, "call journal_forget"); 397 journal_forget(handle, bh_in); 398 } else { 399 BUFFER_TRACE(bh, "call brelse"); 400 __brelse(bh); 401 } 402 } 403 404 jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in); 405 err = insert_revoke_hash(journal, blocknr, 406 handle->h_transaction->t_tid); 407 BUFFER_TRACE(bh_in, "exit"); 408 return err; 409} 410 411/* 412 * Cancel an outstanding revoke. For use only internally by the 413 * journaling code (called from journal_get_write_access). 414 * 415 * We trust buffer_revoked() on the buffer if the buffer is already 416 * being journaled: if there is no revoke pending on the buffer, then we 417 * don't do anything here. 418 * 419 * This would break if it were possible for a buffer to be revoked and 420 * discarded, and then reallocated within the same transaction. In such 421 * a case we would have lost the revoked bit, but when we arrived here 422 * the second time we would still have a pending revoke to cancel. So, 423 * do not trust the Revoked bit on buffers unless RevokeValid is also 424 * set. 425 */ 426int journal_cancel_revoke(handle_t *handle, struct journal_head *jh) 427{ 428 struct jbd2_revoke_record_s *record; 429 journal_t *journal = handle->h_transaction->t_journal; 430 int need_cancel; 431 int did_revoke = 0; /* akpm: debug */ 432 struct buffer_head *bh = jh2bh(jh); 433 434 jbd_debug(4, "journal_head %p, cancelling revoke\n", jh); 435 436 /* Is the existing Revoke bit valid? If so, we trust it, and 437 * only perform the full cancel if the revoke bit is set. If 438 * not, we can't trust the revoke bit, and we need to do the 439 * full search for a revoke record. */ 440 if (test_set_buffer_revokevalid(bh)) { 441 need_cancel = test_clear_buffer_revoked(bh); 442 } else { 443 need_cancel = 1; 444 clear_buffer_revoked(bh); 445 } 446 447 if (need_cancel) { 448 record = find_revoke_record(journal, bh->b_blocknr); 449 if (record) { 450 jbd_debug(4, "cancelled existing revoke on " 451 "blocknr %llu\n", (unsigned long long)bh->b_blocknr); 452 spin_lock(&journal->j_revoke_lock); 453 list_del(&record->hash); 454 spin_unlock(&journal->j_revoke_lock); 455 kmem_cache_free(jbd2_revoke_record_cache, record); 456 did_revoke = 1; 457 } 458 } 459 460#ifdef JFS_EXPENSIVE_CHECKING 461 /* There better not be one left behind by now! */ 462 record = find_revoke_record(journal, bh->b_blocknr); 463 J_ASSERT_JH(jh, record == NULL); 464#endif 465 466 /* Finally, have we just cleared revoke on an unhashed 467 * buffer_head? If so, we'd better make sure we clear the 468 * revoked status on any hashed alias too, otherwise the revoke 469 * state machine will get very upset later on. */ 470 if (need_cancel) { 471 struct buffer_head *bh2; 472 bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size); 473 if (bh2) { 474 if (bh2 != bh) 475 clear_buffer_revoked(bh2); 476 __brelse(bh2); 477 } 478 } 479 return did_revoke; 480} 481 482/* 483 * journal_clear_revoked_flag clears revoked flag of buffers in 484 * revoke table to reflect there is no revoked buffers in the next 485 * transaction which is going to be started. 486 */ 487void jbd2_clear_buffer_revoked_flags(journal_t *journal) 488{ 489 struct jbd2_revoke_table_s *revoke = journal->j_revoke; 490 int i = 0; 491 492 for (i = 0; i < revoke->hash_size; i++) { 493 struct list_head *hash_list; 494 struct list_head *list_entry; 495 hash_list = &revoke->hash_table[i]; 496 497 list_for_each(list_entry, hash_list) { 498 struct jbd2_revoke_record_s *record; 499 struct buffer_head *bh; 500 record = (struct jbd2_revoke_record_s *)list_entry; 501 bh = __find_get_block(journal->j_fs_dev, 502 record->blocknr, 503 journal->j_blocksize); 504 if (bh) { 505 clear_buffer_revoked(bh); 506 __brelse(bh); 507 } 508 } 509 } 510} 511 512/* journal_switch_revoke table select j_revoke for next transaction 513 * we do not want to suspend any processing until all revokes are 514 * written -bzzz 515 */ 516void journal_switch_revoke_table(journal_t *journal) 517{ 518 int i; 519 520 if (journal->j_revoke == journal->j_revoke_table[0]) 521 journal->j_revoke = journal->j_revoke_table[1]; 522 else 523 journal->j_revoke = journal->j_revoke_table[0]; 524 525 for (i = 0; i < journal->j_revoke->hash_size; i++) 526 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]); 527} 528 529/* 530 * Write revoke records to the journal for all entries in the current 531 * revoke hash, deleting the entries as we go. 532 */ 533void journal_write_revoke_records(journal_t *journal, 534 transaction_t *transaction, 535 struct list_head *log_bufs, 536 int write_op) 537{ 538 struct buffer_head *descriptor; 539 struct jbd2_revoke_record_s *record; 540 struct jbd2_revoke_table_s *revoke; 541 struct list_head *hash_list; 542 int i, offset, count; 543 544 descriptor = NULL; 545 offset = 0; 546 count = 0; 547 548 /* select revoke table for committing transaction */ 549 revoke = journal->j_revoke == journal->j_revoke_table[0] ? 550 journal->j_revoke_table[1] : journal->j_revoke_table[0]; 551 552 for (i = 0; i < revoke->hash_size; i++) { 553 hash_list = &revoke->hash_table[i]; 554 555 while (!list_empty(hash_list)) { 556 record = (struct jbd2_revoke_record_s *) 557 hash_list->next; 558 write_one_revoke_record(journal, transaction, log_bufs, 559 &descriptor, &offset, 560 record, write_op); 561 count++; 562 list_del(&record->hash); 563 kmem_cache_free(jbd2_revoke_record_cache, record); 564 } 565 } 566 if (descriptor) 567 flush_descriptor(journal, descriptor, offset, write_op); 568 jbd_debug(1, "Wrote %d revoke records\n", count); 569} 570 571/* 572 * Write out one revoke record. We need to create a new descriptor 573 * block if the old one is full or if we have not already created one. 574 */ 575 576static void write_one_revoke_record(journal_t *journal, 577 transaction_t *transaction, 578 struct list_head *log_bufs, 579 struct buffer_head **descriptorp, 580 int *offsetp, 581 struct jbd2_revoke_record_s *record, 582 int write_op) 583{ 584 int csum_size = 0; 585 struct buffer_head *descriptor; 586 int sz, offset; 587 journal_header_t *header; 588 589 /* If we are already aborting, this all becomes a noop. We 590 still need to go round the loop in 591 journal_write_revoke_records in order to free all of the 592 revoke records: only the IO to the journal is omitted. */ 593 if (is_journal_aborted(journal)) 594 return; 595 596 descriptor = *descriptorp; 597 offset = *offsetp; 598 599 /* Do we need to leave space at the end for a checksum? */ 600 if (journal_has_csum_v2or3(journal)) 601 csum_size = sizeof(struct journal_revoke_tail); 602 603 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) 604 sz = 8; 605 else 606 sz = 4; 607 608 /* Make sure we have a descriptor with space left for the record */ 609 if (descriptor) { 610 if (offset + sz > journal->j_blocksize - csum_size) { 611 flush_descriptor(journal, descriptor, offset, write_op); 612 descriptor = NULL; 613 } 614 } 615 616 if (!descriptor) { 617 descriptor = journal_get_descriptor_buffer(journal); 618 if (!descriptor) 619 return; 620 header = (journal_header_t *)descriptor->b_data; 621 header->h_magic = ext2fs_cpu_to_be32(JFS_MAGIC_NUMBER); 622 header->h_blocktype = ext2fs_cpu_to_be32(JFS_REVOKE_BLOCK); 623 header->h_sequence = ext2fs_cpu_to_be32(transaction->t_tid); 624 625 /* Record it so that we can wait for IO completion later */ 626 BUFFER_TRACE(descriptor, "file in log_bufs"); 627 jbd2_file_log_bh(log_bufs, descriptor); 628 629 offset = sizeof(journal_revoke_header_t); 630 *descriptorp = descriptor; 631 } 632 633 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) { 634 * ((__be64 *)(&descriptor->b_data[offset])) = 635 cpu_to_be64(record->blocknr); 636 else 637 * ((__be32 *)(&descriptor->b_data[offset])) = 638 cpu_to_be32(record->blocknr); 639 offset += sz; 640 641 *offsetp = offset; 642} 643 644static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh) 645{ 646 struct journal_revoke_tail *tail; 647 __u32 csum; 648 649 if (!journal_has_csum_v2or3(j)) 650 return; 651 652 tail = (struct journal_revoke_tail *)(bh->b_data + j->j_blocksize - 653 sizeof(struct journal_revoke_tail)); 654 tail->r_checksum = 0; 655 csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); 656 tail->r_checksum = ext2fs_cpu_to_be32(csum); 657} 658 659/* 660 * Flush a revoke descriptor out to the journal. If we are aborting, 661 * this is a noop; otherwise we are generating a buffer which needs to 662 * be waited for during commit, so it has to go onto the appropriate 663 * journal buffer list. 664 */ 665 666static void flush_descriptor(journal_t *journal, 667 struct buffer_head *descriptor, 668 int offset, int write_op) 669{ 670 journal_revoke_header_t *header; 671 672 if (is_journal_aborted(journal)) { 673 put_bh(descriptor); 674 return; 675 } 676 677 header = (journal_revoke_header_t *)descriptor->b_data; 678 header->r_count = ext2fs_cpu_to_be32(offset); 679 jbd2_revoke_csum_set(journal, descriptor); 680 681 set_buffer_jwrite(descriptor); 682 BUFFER_TRACE(descriptor, "write"); 683 set_buffer_dirty(descriptor); 684 write_dirty_buffer(descriptor, write_op); 685} 686#endif 687 688/* 689 * Revoke support for recovery. 690 * 691 * Recovery needs to be able to: 692 * 693 * record all revoke records, including the tid of the latest instance 694 * of each revoke in the journal 695 * 696 * check whether a given block in a given transaction should be replayed 697 * (ie. has not been revoked by a revoke record in that or a subsequent 698 * transaction) 699 * 700 * empty the revoke table after recovery. 701 */ 702 703/* 704 * First, setting revoke records. We create a new revoke record for 705 * every block ever revoked in the log as we scan it for recovery, and 706 * we update the existing records if we find multiple revokes for a 707 * single block. 708 */ 709 710int journal_set_revoke(journal_t *journal, 711 unsigned long long blocknr, 712 tid_t sequence) 713{ 714 struct jbd2_revoke_record_s *record; 715 716 record = find_revoke_record(journal, blocknr); 717 if (record) { 718 /* If we have multiple occurrences, only record the 719 * latest sequence number in the hashed record */ 720 if (tid_gt(sequence, record->sequence)) 721 record->sequence = sequence; 722 return 0; 723 } 724 return insert_revoke_hash(journal, blocknr, sequence); 725} 726 727/* 728 * Test revoke records. For a given block referenced in the log, has 729 * that block been revoked? A revoke record with a given transaction 730 * sequence number revokes all blocks in that transaction and earlier 731 * ones, but later transactions still need replayed. 732 */ 733 734int journal_test_revoke(journal_t *journal, 735 unsigned long long blocknr, 736 tid_t sequence) 737{ 738 struct jbd2_revoke_record_s *record; 739 740 record = find_revoke_record(journal, blocknr); 741 if (!record) 742 return 0; 743 if (tid_gt(sequence, record->sequence)) 744 return 0; 745 return 1; 746} 747 748/* 749 * Finally, once recovery is over, we need to clear the revoke table so 750 * that it can be reused by the running filesystem. 751 */ 752 753void journal_clear_revoke(journal_t *journal) 754{ 755 int i; 756 struct list_head *hash_list; 757 struct jbd2_revoke_record_s *record; 758 struct jbd2_revoke_table_s *revoke; 759 760 revoke = journal->j_revoke; 761 762 for (i = 0; i < revoke->hash_size; i++) { 763 hash_list = &revoke->hash_table[i]; 764 while (!list_empty(hash_list)) { 765 record = (struct jbd2_revoke_record_s*) hash_list->next; 766 list_del(&record->hash); 767 kmem_cache_free(jbd2_revoke_record_cache, record); 768 } 769 } 770} 771