objectid.c revision 09f1b80ba8c967b6e17c0516e95578d5da18115f
1/* 2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README 3 */ 4 5#include <linux/string.h> 6#include <linux/random.h> 7#include <linux/time.h> 8#include "reiserfs.h" 9 10/* find where objectid map starts */ 11#define objectid_map(s,rs) (old_format_only (s) ? \ 12 (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\ 13 (__le32 *)((rs) + 1)) 14 15#ifdef CONFIG_REISERFS_CHECK 16 17static void check_objectid_map(struct super_block *s, __le32 * map) 18{ 19 if (le32_to_cpu(map[0]) != 1) 20 reiserfs_panic(s, "vs-15010", "map corrupted: %lx", 21 (long unsigned int)le32_to_cpu(map[0])); 22 23 /* FIXME: add something else here */ 24} 25 26#else 27static void check_objectid_map(struct super_block *s, __le32 * map) 28{; 29} 30#endif 31 32/* 33 * When we allocate objectids we allocate the first unused objectid. 34 * Each sequence of objectids in use (the odd sequences) is followed 35 * by a sequence of objectids not in use (the even sequences). We 36 * only need to record the last objectid in each of these sequences 37 * (both the odd and even sequences) in order to fully define the 38 * boundaries of the sequences. A consequence of allocating the first 39 * objectid not in use is that under most conditions this scheme is 40 * extremely compact. The exception is immediately after a sequence 41 * of operations which deletes a large number of objects of 42 * non-sequential objectids, and even then it will become compact 43 * again as soon as more objects are created. Note that many 44 * interesting optimizations of layout could result from complicating 45 * objectid assignment, but we have deferred making them for now. 46 */ 47 48/* get unique object identifier */ 49__u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th) 50{ 51 struct super_block *s = th->t_super; 52 struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s); 53 __le32 *map = objectid_map(s, rs); 54 __u32 unused_objectid; 55 56 BUG_ON(!th->t_trans_id); 57 58 check_objectid_map(s, map); 59 60 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); 61 /* comment needed -Hans */ 62 unused_objectid = le32_to_cpu(map[1]); 63 if (unused_objectid == U32_MAX) { 64 reiserfs_warning(s, "reiserfs-15100", "no more object ids"); 65 reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s)); 66 return 0; 67 } 68 69 /* 70 * This incrementation allocates the first unused objectid. That 71 * is to say, the first entry on the objectid map is the first 72 * unused objectid, and by incrementing it we use it. See below 73 * where we check to see if we eliminated a sequence of unused 74 * objectids.... 75 */ 76 map[1] = cpu_to_le32(unused_objectid + 1); 77 78 /* 79 * Now we check to see if we eliminated the last remaining member of 80 * the first even sequence (and can eliminate the sequence by 81 * eliminating its last objectid from oids), and can collapse the 82 * first two odd sequences into one sequence. If so, then the net 83 * result is to eliminate a pair of objectids from oids. We do this 84 * by shifting the entire map to the left. 85 */ 86 if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) { 87 memmove(map + 1, map + 3, 88 (sb_oid_cursize(rs) - 3) * sizeof(__u32)); 89 set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2); 90 } 91 92 journal_mark_dirty(th, SB_BUFFER_WITH_SB(s)); 93 return unused_objectid; 94} 95 96/* makes object identifier unused */ 97void reiserfs_release_objectid(struct reiserfs_transaction_handle *th, 98 __u32 objectid_to_release) 99{ 100 struct super_block *s = th->t_super; 101 struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s); 102 __le32 *map = objectid_map(s, rs); 103 int i = 0; 104 105 BUG_ON(!th->t_trans_id); 106 /*return; */ 107 check_objectid_map(s, map); 108 109 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); 110 journal_mark_dirty(th, SB_BUFFER_WITH_SB(s)); 111 112 /* 113 * start at the beginning of the objectid map (i = 0) and go to 114 * the end of it (i = disk_sb->s_oid_cursize). Linear search is 115 * what we use, though it is possible that binary search would be 116 * more efficient after performing lots of deletions (which is 117 * when oids is large.) We only check even i's. 118 */ 119 while (i < sb_oid_cursize(rs)) { 120 if (objectid_to_release == le32_to_cpu(map[i])) { 121 /* This incrementation unallocates the objectid. */ 122 le32_add_cpu(&map[i], 1); 123 124 /* 125 * Did we unallocate the last member of an 126 * odd sequence, and can shrink oids? 127 */ 128 if (map[i] == map[i + 1]) { 129 /* shrink objectid map */ 130 memmove(map + i, map + i + 2, 131 (sb_oid_cursize(rs) - i - 132 2) * sizeof(__u32)); 133 set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2); 134 135 RFALSE(sb_oid_cursize(rs) < 2 || 136 sb_oid_cursize(rs) > sb_oid_maxsize(rs), 137 "vs-15005: objectid map corrupted cur_size == %d (max == %d)", 138 sb_oid_cursize(rs), sb_oid_maxsize(rs)); 139 } 140 return; 141 } 142 143 if (objectid_to_release > le32_to_cpu(map[i]) && 144 objectid_to_release < le32_to_cpu(map[i + 1])) { 145 /* size of objectid map is not changed */ 146 if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) { 147 le32_add_cpu(&map[i + 1], -1); 148 return; 149 } 150 151 /* 152 * JDM comparing two little-endian values for 153 * equality -- safe 154 */ 155 /* 156 * objectid map must be expanded, but 157 * there is no space 158 */ 159 if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) { 160 PROC_INFO_INC(s, leaked_oid); 161 return; 162 } 163 164 /* expand the objectid map */ 165 memmove(map + i + 3, map + i + 1, 166 (sb_oid_cursize(rs) - i - 1) * sizeof(__u32)); 167 map[i + 1] = cpu_to_le32(objectid_to_release); 168 map[i + 2] = cpu_to_le32(objectid_to_release + 1); 169 set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2); 170 return; 171 } 172 i += 2; 173 } 174 175 reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)", 176 (long unsigned)objectid_to_release); 177} 178 179int reiserfs_convert_objectid_map_v1(struct super_block *s) 180{ 181 struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK(s); 182 int cur_size = sb_oid_cursize(disk_sb); 183 int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2; 184 int old_max = sb_oid_maxsize(disk_sb); 185 struct reiserfs_super_block_v1 *disk_sb_v1; 186 __le32 *objectid_map, *new_objectid_map; 187 int i; 188 189 disk_sb_v1 = 190 (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data); 191 objectid_map = (__le32 *) (disk_sb_v1 + 1); 192 new_objectid_map = (__le32 *) (disk_sb + 1); 193 194 if (cur_size > new_size) { 195 /* 196 * mark everyone used that was listed as free at 197 * the end of the objectid map 198 */ 199 objectid_map[new_size - 1] = objectid_map[cur_size - 1]; 200 set_sb_oid_cursize(disk_sb, new_size); 201 } 202 /* move the smaller objectid map past the end of the new super */ 203 for (i = new_size - 1; i >= 0; i--) { 204 objectid_map[i + (old_max - new_size)] = objectid_map[i]; 205 } 206 207 /* set the max size so we don't overflow later */ 208 set_sb_oid_maxsize(disk_sb, new_size); 209 210 /* Zero out label and generate random UUID */ 211 memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label)); 212 generate_random_uuid(disk_sb->s_uuid); 213 214 /* finally, zero out the unused chunk of the new super */ 215 memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused)); 216 return 0; 217} 218