mount.c revision 3c828a4ef8d7efbc226ac3643b2d7746b121a78e
1/**
2 * mount.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 *             http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include "fsck.h"
12#include <locale.h>
13
14u32 get_free_segments(struct f2fs_sb_info *sbi)
15{
16	u32 i, free_segs = 0;
17
18	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
19		struct seg_entry *se = get_seg_entry(sbi, i);
20
21		if (se->valid_blocks == 0x0 &&
22				!IS_CUR_SEGNO(sbi, i, NO_CHECK_TYPE))
23			free_segs++;
24	}
25	return free_segs;
26}
27
28void update_free_segments(struct f2fs_sb_info *sbi)
29{
30	char *progress = "-*|*-";
31	static int i = 0;
32
33	MSG(0, "\r [ %c ] Free segments: 0x%x", progress[i % 5], get_free_segments(sbi));
34	fflush(stdout);
35	i++;
36}
37
38void print_inode_info(struct f2fs_inode *inode, int name)
39{
40	unsigned char en[F2FS_NAME_LEN + 1];
41	unsigned int i = 0;
42	int namelen = le32_to_cpu(inode->i_namelen);
43	int enc_name = file_enc_name(inode);
44	int ofs = __get_extra_isize(inode);
45
46	namelen = convert_encrypted_name(inode->i_name, namelen, en, enc_name);
47	en[namelen] = '\0';
48	if (name && namelen) {
49		inode->i_name[namelen] = '\0';
50		MSG(0, " - File name         : %s%s\n", en,
51				enc_name ? " <encrypted>" : "");
52		setlocale(LC_ALL, "");
53		MSG(0, " - File size         : %'llu (bytes)\n",
54				le64_to_cpu(inode->i_size));
55		return;
56	}
57
58	DISP_u32(inode, i_mode);
59	DISP_u32(inode, i_advise);
60	DISP_u32(inode, i_uid);
61	DISP_u32(inode, i_gid);
62	DISP_u32(inode, i_links);
63	DISP_u64(inode, i_size);
64	DISP_u64(inode, i_blocks);
65
66	DISP_u64(inode, i_atime);
67	DISP_u32(inode, i_atime_nsec);
68	DISP_u64(inode, i_ctime);
69	DISP_u32(inode, i_ctime_nsec);
70	DISP_u64(inode, i_mtime);
71	DISP_u32(inode, i_mtime_nsec);
72
73	DISP_u32(inode, i_generation);
74	DISP_u32(inode, i_current_depth);
75	DISP_u32(inode, i_xattr_nid);
76	DISP_u32(inode, i_flags);
77	DISP_u32(inode, i_inline);
78	DISP_u32(inode, i_pino);
79	DISP_u32(inode, i_dir_level);
80
81	if (namelen) {
82		DISP_u32(inode, i_namelen);
83		printf("%-30s\t\t[%s]\n", "i_name", en);
84	}
85
86	printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
87			le32_to_cpu(inode->i_ext.fofs),
88			le32_to_cpu(inode->i_ext.blk_addr),
89			le32_to_cpu(inode->i_ext.len));
90
91	DISP_u16(inode, i_extra_isize);
92	DISP_u16(inode, i_inline_xattr_size);
93	DISP_u32(inode, i_projid);
94	DISP_u32(inode, i_inode_checksum);
95
96	DISP_u32(inode, i_addr[ofs]);		/* Pointers to data blocks */
97	DISP_u32(inode, i_addr[ofs + 1]);	/* Pointers to data blocks */
98	DISP_u32(inode, i_addr[ofs + 2]);	/* Pointers to data blocks */
99	DISP_u32(inode, i_addr[ofs + 3]);	/* Pointers to data blocks */
100
101	for (i = ofs + 3; i < ADDRS_PER_INODE(inode); i++) {
102		if (inode->i_addr[i] == 0x0)
103			break;
104		printf("i_addr[0x%x] points data block\t\t[0x%4x]\n",
105				i, le32_to_cpu(inode->i_addr[i]));
106	}
107
108	DISP_u32(inode, i_nid[0]);	/* direct */
109	DISP_u32(inode, i_nid[1]);	/* direct */
110	DISP_u32(inode, i_nid[2]);	/* indirect */
111	DISP_u32(inode, i_nid[3]);	/* indirect */
112	DISP_u32(inode, i_nid[4]);	/* double indirect */
113
114	printf("\n");
115}
116
117void print_node_info(struct f2fs_node *node_block, int verbose)
118{
119	nid_t ino = le32_to_cpu(node_block->footer.ino);
120	nid_t nid = le32_to_cpu(node_block->footer.nid);
121	/* Is this inode? */
122	if (ino == nid) {
123		DBG(verbose, "Node ID [0x%x:%u] is inode\n", nid, nid);
124		print_inode_info(&node_block->i, verbose);
125	} else {
126		int i;
127		u32 *dump_blk = (u32 *)node_block;
128		DBG(verbose,
129			"Node ID [0x%x:%u] is direct node or indirect node.\n",
130								nid, nid);
131		for (i = 0; i <= 10; i++)
132			MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
133						i, dump_blk[i], dump_blk[i]);
134	}
135}
136
137static void DISP_label(u_int16_t *name)
138{
139	char buffer[MAX_VOLUME_NAME];
140
141	utf16_to_utf8(buffer, name, MAX_VOLUME_NAME, MAX_VOLUME_NAME);
142	printf("%-30s" "\t\t[%s]\n", "volum_name", buffer);
143}
144
145void print_raw_sb_info(struct f2fs_super_block *sb)
146{
147	if (!c.dbg_lv)
148		return;
149
150	printf("\n");
151	printf("+--------------------------------------------------------+\n");
152	printf("| Super block                                            |\n");
153	printf("+--------------------------------------------------------+\n");
154
155	DISP_u32(sb, magic);
156	DISP_u32(sb, major_ver);
157
158	DISP_label(sb->volume_name);
159
160	DISP_u32(sb, minor_ver);
161	DISP_u32(sb, log_sectorsize);
162	DISP_u32(sb, log_sectors_per_block);
163
164	DISP_u32(sb, log_blocksize);
165	DISP_u32(sb, log_blocks_per_seg);
166	DISP_u32(sb, segs_per_sec);
167	DISP_u32(sb, secs_per_zone);
168	DISP_u32(sb, checksum_offset);
169	DISP_u64(sb, block_count);
170
171	DISP_u32(sb, section_count);
172	DISP_u32(sb, segment_count);
173	DISP_u32(sb, segment_count_ckpt);
174	DISP_u32(sb, segment_count_sit);
175	DISP_u32(sb, segment_count_nat);
176
177	DISP_u32(sb, segment_count_ssa);
178	DISP_u32(sb, segment_count_main);
179	DISP_u32(sb, segment0_blkaddr);
180
181	DISP_u32(sb, cp_blkaddr);
182	DISP_u32(sb, sit_blkaddr);
183	DISP_u32(sb, nat_blkaddr);
184	DISP_u32(sb, ssa_blkaddr);
185	DISP_u32(sb, main_blkaddr);
186
187	DISP_u32(sb, root_ino);
188	DISP_u32(sb, node_ino);
189	DISP_u32(sb, meta_ino);
190	DISP_u32(sb, cp_payload);
191	DISP("%s", sb, version);
192	printf("\n");
193}
194
195void print_ckpt_info(struct f2fs_sb_info *sbi)
196{
197	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
198
199	if (!c.dbg_lv)
200		return;
201
202	printf("\n");
203	printf("+--------------------------------------------------------+\n");
204	printf("| Checkpoint                                             |\n");
205	printf("+--------------------------------------------------------+\n");
206
207	DISP_u64(cp, checkpoint_ver);
208	DISP_u64(cp, user_block_count);
209	DISP_u64(cp, valid_block_count);
210	DISP_u32(cp, rsvd_segment_count);
211	DISP_u32(cp, overprov_segment_count);
212	DISP_u32(cp, free_segment_count);
213
214	DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
215	DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
216	DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
217	DISP_u32(cp, cur_node_segno[0]);
218	DISP_u32(cp, cur_node_segno[1]);
219	DISP_u32(cp, cur_node_segno[2]);
220
221	DISP_u32(cp, cur_node_blkoff[0]);
222	DISP_u32(cp, cur_node_blkoff[1]);
223	DISP_u32(cp, cur_node_blkoff[2]);
224
225
226	DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
227	DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
228	DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
229	DISP_u32(cp, cur_data_segno[0]);
230	DISP_u32(cp, cur_data_segno[1]);
231	DISP_u32(cp, cur_data_segno[2]);
232
233	DISP_u32(cp, cur_data_blkoff[0]);
234	DISP_u32(cp, cur_data_blkoff[1]);
235	DISP_u32(cp, cur_data_blkoff[2]);
236
237	DISP_u32(cp, ckpt_flags);
238	DISP_u32(cp, cp_pack_total_block_count);
239	DISP_u32(cp, cp_pack_start_sum);
240	DISP_u32(cp, valid_node_count);
241	DISP_u32(cp, valid_inode_count);
242	DISP_u32(cp, next_free_nid);
243	DISP_u32(cp, sit_ver_bitmap_bytesize);
244	DISP_u32(cp, nat_ver_bitmap_bytesize);
245	DISP_u32(cp, checksum_offset);
246	DISP_u64(cp, elapsed_time);
247
248	DISP_u32(cp, sit_nat_version_bitmap[0]);
249	printf("\n\n");
250}
251
252void print_cp_state(u32 flag)
253{
254	MSG(0, "Info: checkpoint state = %x : ", flag);
255	if (flag & CP_FSCK_FLAG)
256		MSG(0, "%s", " fsck");
257	if (flag & CP_ERROR_FLAG)
258		MSG(0, "%s", " error");
259	if (flag & CP_COMPACT_SUM_FLAG)
260		MSG(0, "%s", " compacted_summary");
261	if (flag & CP_ORPHAN_PRESENT_FLAG)
262		MSG(0, "%s", " orphan_inodes");
263	if (flag & CP_FASTBOOT_FLAG)
264		MSG(0, "%s", " fastboot");
265	if (flag & CP_NAT_BITS_FLAG)
266		MSG(0, "%s", " nat_bits");
267	if (flag & CP_TRIMMED_FLAG)
268		MSG(0, "%s", " trimmed");
269	if (flag & CP_UMOUNT_FLAG)
270		MSG(0, "%s", " unmount");
271	else
272		MSG(0, "%s", " sudden-power-off");
273	MSG(0, "\n");
274}
275
276void print_sb_state(struct f2fs_super_block *sb)
277{
278	__le32 f = sb->feature;
279	int i;
280
281	MSG(0, "Info: superblock features = %x : ", f);
282	if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
283		MSG(0, "%s", " encrypt");
284	}
285	if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
286		MSG(0, "%s", " zoned block device");
287	}
288	if (f & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
289		MSG(0, "%s", " extra attribute");
290	}
291	if (f & cpu_to_le32(F2FS_FEATURE_PRJQUOTA)) {
292		MSG(0, "%s", " project quota");
293	}
294	if (f & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) {
295		MSG(0, "%s", " inode checksum");
296	}
297	if (f & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) {
298		MSG(0, "%s", " flexible inline xattr");
299	}
300	if (f & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
301		MSG(0, "%s", " quota ino");
302	}
303	MSG(0, "\n");
304	MSG(0, "Info: superblock encrypt level = %d, salt = ",
305					sb->encryption_level);
306	for (i = 0; i < 16; i++)
307		MSG(0, "%02x", sb->encrypt_pw_salt[i]);
308	MSG(0, "\n");
309}
310
311static inline int sanity_check_area_boundary(struct f2fs_super_block *sb,
312							u64 offset)
313{
314	u32 segment0_blkaddr = get_sb(segment0_blkaddr);
315	u32 cp_blkaddr = get_sb(cp_blkaddr);
316	u32 sit_blkaddr = get_sb(sit_blkaddr);
317	u32 nat_blkaddr = get_sb(nat_blkaddr);
318	u32 ssa_blkaddr = get_sb(ssa_blkaddr);
319	u32 main_blkaddr = get_sb(main_blkaddr);
320	u32 segment_count_ckpt = get_sb(segment_count_ckpt);
321	u32 segment_count_sit = get_sb(segment_count_sit);
322	u32 segment_count_nat = get_sb(segment_count_nat);
323	u32 segment_count_ssa = get_sb(segment_count_ssa);
324	u32 segment_count_main = get_sb(segment_count_main);
325	u32 segment_count = get_sb(segment_count);
326	u32 log_blocks_per_seg = get_sb(log_blocks_per_seg);
327	u64 main_end_blkaddr = main_blkaddr +
328				(segment_count_main << log_blocks_per_seg);
329	u64 seg_end_blkaddr = segment0_blkaddr +
330				(segment_count << log_blocks_per_seg);
331
332	if (segment0_blkaddr != cp_blkaddr) {
333		MSG(0, "\tMismatch segment0(%u) cp_blkaddr(%u)\n",
334				segment0_blkaddr, cp_blkaddr);
335		return -1;
336	}
337
338	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
339							sit_blkaddr) {
340		MSG(0, "\tWrong CP boundary, start(%u) end(%u) blocks(%u)\n",
341			cp_blkaddr, sit_blkaddr,
342			segment_count_ckpt << log_blocks_per_seg);
343		return -1;
344	}
345
346	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
347							nat_blkaddr) {
348		MSG(0, "\tWrong SIT boundary, start(%u) end(%u) blocks(%u)\n",
349			sit_blkaddr, nat_blkaddr,
350			segment_count_sit << log_blocks_per_seg);
351		return -1;
352	}
353
354	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
355							ssa_blkaddr) {
356		MSG(0, "\tWrong NAT boundary, start(%u) end(%u) blocks(%u)\n",
357			nat_blkaddr, ssa_blkaddr,
358			segment_count_nat << log_blocks_per_seg);
359		return -1;
360	}
361
362	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
363							main_blkaddr) {
364		MSG(0, "\tWrong SSA boundary, start(%u) end(%u) blocks(%u)\n",
365			ssa_blkaddr, main_blkaddr,
366			segment_count_ssa << log_blocks_per_seg);
367		return -1;
368	}
369
370	if (main_end_blkaddr > seg_end_blkaddr) {
371		MSG(0, "\tWrong MAIN_AREA, start(%u) end(%u) block(%u)\n",
372			main_blkaddr,
373			segment0_blkaddr +
374				(segment_count << log_blocks_per_seg),
375			segment_count_main << log_blocks_per_seg);
376		return -1;
377	} else if (main_end_blkaddr < seg_end_blkaddr) {
378		int err;
379
380		set_sb(segment_count, (main_end_blkaddr -
381				segment0_blkaddr) >> log_blocks_per_seg);
382
383		err = dev_write(sb, offset, sizeof(struct f2fs_super_block));
384		MSG(0, "Info: Fix alignment: %s, start(%u) end(%u) block(%u)\n",
385			err ? "failed": "done",
386			main_blkaddr,
387			segment0_blkaddr +
388				(segment_count << log_blocks_per_seg),
389			segment_count_main << log_blocks_per_seg);
390	}
391	return 0;
392}
393
394int sanity_check_raw_super(struct f2fs_super_block *sb, u64 offset)
395{
396	unsigned int blocksize;
397
398	if (F2FS_SUPER_MAGIC != get_sb(magic))
399		return -1;
400
401	if (F2FS_BLKSIZE != PAGE_CACHE_SIZE)
402		return -1;
403
404	blocksize = 1 << get_sb(log_blocksize);
405	if (F2FS_BLKSIZE != blocksize)
406		return -1;
407
408	/* check log blocks per segment */
409	if (get_sb(log_blocks_per_seg) != 9)
410		return -1;
411
412	/* Currently, support 512/1024/2048/4096 bytes sector size */
413	if (get_sb(log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE ||
414			get_sb(log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE)
415		return -1;
416
417	if (get_sb(log_sectors_per_block) + get_sb(log_sectorsize) !=
418						F2FS_MAX_LOG_SECTOR_SIZE)
419		return -1;
420
421	/* check reserved ino info */
422	if (get_sb(node_ino) != 1 || get_sb(meta_ino) != 2 ||
423					get_sb(root_ino) != 3)
424		return -1;
425
426	/* Check zoned block device feature */
427	if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
428			!(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
429		MSG(0, "\tMissing zoned block device feature\n");
430		return -1;
431	}
432
433	if (get_sb(segment_count) > F2FS_MAX_SEGMENT)
434		return -1;
435
436	if (sanity_check_area_boundary(sb, offset))
437		return -1;
438	return 0;
439}
440
441int validate_super_block(struct f2fs_sb_info *sbi, int block)
442{
443	u64 offset;
444
445	sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
446
447	if (block == 0)
448		offset = F2FS_SUPER_OFFSET;
449	else
450		offset = F2FS_BLKSIZE + F2FS_SUPER_OFFSET;
451
452	if (dev_read(sbi->raw_super, offset, sizeof(struct f2fs_super_block)))
453		return -1;
454
455	if (!sanity_check_raw_super(sbi->raw_super, offset)) {
456		/* get kernel version */
457		if (c.kd >= 0) {
458			dev_read_version(c.version, 0, VERSION_LEN);
459			get_kernel_version(c.version);
460		} else {
461			memset(c.version, 0, VERSION_LEN);
462		}
463
464		/* build sb version */
465		memcpy(c.sb_version, sbi->raw_super->version, VERSION_LEN);
466		get_kernel_version(c.sb_version);
467		memcpy(c.init_version, sbi->raw_super->init_version, VERSION_LEN);
468		get_kernel_version(c.init_version);
469
470		MSG(0, "Info: MKFS version\n  \"%s\"\n", c.init_version);
471		MSG(0, "Info: FSCK version\n  from \"%s\"\n    to \"%s\"\n",
472					c.sb_version, c.version);
473		if (memcmp(c.sb_version, c.version, VERSION_LEN)) {
474			int ret;
475
476			memcpy(sbi->raw_super->version,
477						c.version, VERSION_LEN);
478			ret = dev_write(sbi->raw_super, offset,
479					sizeof(struct f2fs_super_block));
480			ASSERT(ret >= 0);
481
482			c.auto_fix = 0;
483			c.fix_on = 1;
484		}
485		print_sb_state(sbi->raw_super);
486		return 0;
487	}
488
489	free(sbi->raw_super);
490	sbi->raw_super = NULL;
491	MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", block);
492
493	return -EINVAL;
494}
495
496int init_sb_info(struct f2fs_sb_info *sbi)
497{
498	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
499	u64 total_sectors;
500	int i;
501
502	sbi->log_sectors_per_block = get_sb(log_sectors_per_block);
503	sbi->log_blocksize = get_sb(log_blocksize);
504	sbi->blocksize = 1 << sbi->log_blocksize;
505	sbi->log_blocks_per_seg = get_sb(log_blocks_per_seg);
506	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
507	sbi->segs_per_sec = get_sb(segs_per_sec);
508	sbi->secs_per_zone = get_sb(secs_per_zone);
509	sbi->total_sections = get_sb(section_count);
510	sbi->total_node_count = (get_sb(segment_count_nat) / 2) *
511				sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
512	sbi->root_ino_num = get_sb(root_ino);
513	sbi->node_ino_num = get_sb(node_ino);
514	sbi->meta_ino_num = get_sb(meta_ino);
515	sbi->cur_victim_sec = NULL_SEGNO;
516
517	for (i = 0; i < MAX_DEVICES; i++) {
518		if (!sb->devs[i].path[0])
519			break;
520
521		if (i) {
522			c.devices[i].path = strdup((char *)sb->devs[i].path);
523			if (get_device_info(i))
524				ASSERT(0);
525		} else {
526			ASSERT(!strcmp((char *)sb->devs[i].path,
527						(char *)c.devices[i].path));
528		}
529
530		c.devices[i].total_segments =
531			le32_to_cpu(sb->devs[i].total_segments);
532		if (i)
533			c.devices[i].start_blkaddr =
534				c.devices[i - 1].end_blkaddr + 1;
535		c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
536			c.devices[i].total_segments *
537			c.blks_per_seg - 1;
538		if (i == 0)
539			c.devices[i].end_blkaddr += get_sb(segment0_blkaddr);
540
541		c.ndevs = i + 1;
542		MSG(0, "Info: Device[%d] : %s blkaddr = %"PRIx64"--%"PRIx64"\n",
543				i, c.devices[i].path,
544				c.devices[i].start_blkaddr,
545				c.devices[i].end_blkaddr);
546	}
547
548	total_sectors = get_sb(block_count) << sbi->log_sectors_per_block;
549	MSG(0, "Info: total FS sectors = %"PRIu64" (%"PRIu64" MB)\n",
550				total_sectors, total_sectors >>
551						(20 - get_sb(log_sectorsize)));
552	return 0;
553}
554
555void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
556				unsigned long long *version)
557{
558	void *cp_page_1, *cp_page_2;
559	struct f2fs_checkpoint *cp;
560	unsigned long blk_size = sbi->blocksize;
561	unsigned long long cur_version = 0, pre_version = 0;
562	unsigned int crc = 0;
563	size_t crc_offset;
564
565	/* Read the 1st cp block in this CP pack */
566	cp_page_1 = malloc(PAGE_SIZE);
567	if (dev_read_block(cp_page_1, cp_addr) < 0)
568		goto invalid_cp1;
569
570	cp = (struct f2fs_checkpoint *)cp_page_1;
571	crc_offset = get_cp(checksum_offset);
572	if (crc_offset > (blk_size - sizeof(__le32)))
573		goto invalid_cp1;
574
575	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
576	if (f2fs_crc_valid(crc, cp, crc_offset))
577		goto invalid_cp1;
578
579	pre_version = get_cp(checkpoint_ver);
580
581	/* Read the 2nd cp block in this CP pack */
582	cp_page_2 = malloc(PAGE_SIZE);
583	cp_addr += get_cp(cp_pack_total_block_count) - 1;
584
585	if (dev_read_block(cp_page_2, cp_addr) < 0)
586		goto invalid_cp2;
587
588	cp = (struct f2fs_checkpoint *)cp_page_2;
589	crc_offset = get_cp(checksum_offset);
590	if (crc_offset > (blk_size - sizeof(__le32)))
591		goto invalid_cp2;
592
593	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
594	if (f2fs_crc_valid(crc, cp, crc_offset))
595		goto invalid_cp2;
596
597	cur_version = get_cp(checkpoint_ver);
598
599	if (cur_version == pre_version) {
600		*version = cur_version;
601		free(cp_page_2);
602		return cp_page_1;
603	}
604
605invalid_cp2:
606	free(cp_page_2);
607invalid_cp1:
608	free(cp_page_1);
609	return NULL;
610}
611
612int get_valid_checkpoint(struct f2fs_sb_info *sbi)
613{
614	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
615	void *cp1, *cp2, *cur_page;
616	unsigned long blk_size = sbi->blocksize;
617	unsigned long long cp1_version = 0, cp2_version = 0, version;
618	unsigned long long cp_start_blk_no;
619	unsigned int cp_payload, cp_blks;
620	int ret;
621
622	cp_payload = get_sb(cp_payload);
623	if (cp_payload > F2FS_BLK_ALIGN(MAX_SIT_BITMAP_SIZE))
624		return -EINVAL;
625
626	cp_blks = 1 + cp_payload;
627	sbi->ckpt = malloc(cp_blks * blk_size);
628	if (!sbi->ckpt)
629		return -ENOMEM;
630	/*
631	 * Finding out valid cp block involves read both
632	 * sets( cp pack1 and cp pack 2)
633	 */
634	cp_start_blk_no = get_sb(cp_blkaddr);
635	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
636
637	/* The second checkpoint pack should start at the next segment */
638	cp_start_blk_no += 1 << get_sb(log_blocks_per_seg);
639	cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
640
641	if (cp1 && cp2) {
642		if (ver_after(cp2_version, cp1_version)) {
643			cur_page = cp2;
644			sbi->cur_cp = 2;
645			version = cp2_version;
646		} else {
647			cur_page = cp1;
648			sbi->cur_cp = 1;
649			version = cp1_version;
650		}
651	} else if (cp1) {
652		cur_page = cp1;
653		sbi->cur_cp = 1;
654		version = cp1_version;
655	} else if (cp2) {
656		cur_page = cp2;
657		sbi->cur_cp = 2;
658		version = cp2_version;
659	} else
660		goto fail_no_cp;
661
662	MSG(0, "Info: CKPT version = %llx\n", version);
663
664	memcpy(sbi->ckpt, cur_page, blk_size);
665
666	if (cp_blks > 1) {
667		unsigned int i;
668		unsigned long long cp_blk_no;
669
670		cp_blk_no = get_sb(cp_blkaddr);
671		if (cur_page == cp2)
672			cp_blk_no += 1 << get_sb(log_blocks_per_seg);
673
674		/* copy sit bitmap */
675		for (i = 1; i < cp_blks; i++) {
676			unsigned char *ckpt = (unsigned char *)sbi->ckpt;
677			ret = dev_read_block(cur_page, cp_blk_no + i);
678			ASSERT(ret >= 0);
679			memcpy(ckpt + i * blk_size, cur_page, blk_size);
680		}
681	}
682	if (cp1)
683		free(cp1);
684	if (cp2)
685		free(cp2);
686	return 0;
687
688fail_no_cp:
689	free(sbi->ckpt);
690	sbi->ckpt = NULL;
691	return -EINVAL;
692}
693
694int sanity_check_ckpt(struct f2fs_sb_info *sbi)
695{
696	unsigned int total, fsmeta;
697	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
698	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
699
700	total = get_sb(segment_count);
701	fsmeta = get_sb(segment_count_ckpt);
702	fsmeta += get_sb(segment_count_sit);
703	fsmeta += get_sb(segment_count_nat);
704	fsmeta += get_cp(rsvd_segment_count);
705	fsmeta += get_sb(segment_count_ssa);
706
707	if (fsmeta >= total)
708		return 1;
709
710	return 0;
711}
712
713static pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
714{
715	struct f2fs_nm_info *nm_i = NM_I(sbi);
716	pgoff_t block_off;
717	pgoff_t block_addr;
718	int seg_off;
719
720	block_off = NAT_BLOCK_OFFSET(start);
721	seg_off = block_off >> sbi->log_blocks_per_seg;
722
723	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
724			(seg_off << sbi->log_blocks_per_seg << 1) +
725			(block_off & ((1 << sbi->log_blocks_per_seg) -1)));
726
727	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
728		block_addr += sbi->blocks_per_seg;
729
730	return block_addr;
731}
732
733static int f2fs_init_nid_bitmap(struct f2fs_sb_info *sbi)
734{
735	struct f2fs_nm_info *nm_i = NM_I(sbi);
736	int nid_bitmap_size = (nm_i->max_nid + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
737	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
738	struct f2fs_summary_block *sum = curseg->sum_blk;
739	struct f2fs_journal *journal = &sum->journal;
740	struct f2fs_nat_block nat_block;
741	block_t start_blk;
742	nid_t nid;
743	int i;
744
745	if (!(c.func == SLOAD || c.func == FSCK))
746		return 0;
747
748	nm_i->nid_bitmap = (char *)calloc(nid_bitmap_size, 1);
749	if (!nm_i->nid_bitmap)
750		return -ENOMEM;
751
752	/* arbitrarily set 0 bit */
753	f2fs_set_bit(0, nm_i->nid_bitmap);
754
755	memset((void *)&nat_block, 0, sizeof(struct f2fs_nat_block));
756
757	for (nid = 0; nid < nm_i->max_nid; nid++) {
758		if (!(nid % NAT_ENTRY_PER_BLOCK)) {
759			int ret;
760
761			start_blk = current_nat_addr(sbi, nid);
762			ret = dev_read_block((void *)&nat_block, start_blk);
763			ASSERT(ret >= 0);
764		}
765
766		if (nat_block.entries[nid % NAT_ENTRY_PER_BLOCK].block_addr)
767			f2fs_set_bit(nid, nm_i->nid_bitmap);
768	}
769
770	for (i = 0; i < nats_in_cursum(journal); i++) {
771		block_t addr;
772
773		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
774		nid = le32_to_cpu(nid_in_journal(journal, i));
775		if (addr != NULL_ADDR)
776			f2fs_set_bit(nid, nm_i->nid_bitmap);
777	}
778	return 0;
779}
780
781u32 update_nat_bits_flags(struct f2fs_super_block *sb,
782				struct f2fs_checkpoint *cp, u32 flags)
783{
784	u_int32_t nat_bits_bytes, nat_bits_blocks;
785
786	nat_bits_bytes = get_sb(segment_count_nat) << 5;
787	nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
788						F2FS_BLKSIZE - 1);
789	if (get_cp(cp_pack_total_block_count) <=
790			(1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
791		flags |= CP_NAT_BITS_FLAG;
792	else
793		flags &= (~CP_NAT_BITS_FLAG);
794
795	return flags;
796}
797
798/* should call flush_journal_entries() bfore this */
799void write_nat_bits(struct f2fs_sb_info *sbi,
800	struct f2fs_super_block *sb, struct f2fs_checkpoint *cp, int set)
801{
802	struct f2fs_nm_info *nm_i = NM_I(sbi);
803	u_int32_t nat_blocks = get_sb(segment_count_nat) <<
804				(get_sb(log_blocks_per_seg) - 1);
805	u_int32_t nat_bits_bytes = nat_blocks >> 3;
806	u_int32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
807					8 + F2FS_BLKSIZE - 1);
808	unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
809	struct f2fs_nat_block *nat_block;
810	u_int32_t i, j;
811	block_t blkaddr;
812	int ret;
813
814	nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
815	ASSERT(nat_bits);
816
817	nat_block = malloc(F2FS_BLKSIZE);
818	ASSERT(nat_block);
819
820	full_nat_bits = nat_bits + 8;
821	empty_nat_bits = full_nat_bits + nat_bits_bytes;
822
823	memset(full_nat_bits, 0, nat_bits_bytes);
824	memset(empty_nat_bits, 0, nat_bits_bytes);
825
826	for (i = 0; i < nat_blocks; i++) {
827		int seg_off = i >> get_sb(log_blocks_per_seg);
828		int valid = 0;
829
830		blkaddr = (pgoff_t)(get_sb(nat_blkaddr) +
831				(seg_off << get_sb(log_blocks_per_seg) << 1) +
832				(i & ((1 << get_sb(log_blocks_per_seg)) - 1)));
833
834		if (f2fs_test_bit(i, nm_i->nat_bitmap))
835			blkaddr += (1 << get_sb(log_blocks_per_seg));
836
837		ret = dev_read_block(nat_block, blkaddr);
838		ASSERT(ret >= 0);
839
840		for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
841			if ((i == 0 && j == 0) ||
842				nat_block->entries[j].block_addr != NULL_ADDR)
843				valid++;
844		}
845		if (valid == 0)
846			test_and_set_bit_le(i, empty_nat_bits);
847		else if (valid == NAT_ENTRY_PER_BLOCK)
848			test_and_set_bit_le(i, full_nat_bits);
849	}
850	*(__le64 *)nat_bits = get_cp_crc(cp);
851	free(nat_block);
852
853	blkaddr = get_sb(segment0_blkaddr) + (set <<
854				get_sb(log_blocks_per_seg)) - nat_bits_blocks;
855
856	DBG(1, "\tWriting NAT bits pages, at offset 0x%08x\n", blkaddr);
857
858	for (i = 0; i < nat_bits_blocks; i++) {
859		if (dev_write_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
860			ASSERT_MSG("\tError: write NAT bits to disk!!!\n");
861	}
862	MSG(0, "Info: Write valid nat_bits in checkpoint\n");
863
864	free(nat_bits);
865}
866
867int init_node_manager(struct f2fs_sb_info *sbi)
868{
869	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
870	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
871	struct f2fs_nm_info *nm_i = NM_I(sbi);
872	unsigned char *version_bitmap;
873	unsigned int nat_segs, nat_blocks;
874
875	nm_i->nat_blkaddr = get_sb(nat_blkaddr);
876
877	/* segment_count_nat includes pair segment so divide to 2. */
878	nat_segs = get_sb(segment_count_nat) >> 1;
879	nat_blocks = nat_segs << get_sb(log_blocks_per_seg);
880	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
881	nm_i->fcnt = 0;
882	nm_i->nat_cnt = 0;
883	nm_i->init_scan_nid = get_cp(next_free_nid);
884	nm_i->next_scan_nid = get_cp(next_free_nid);
885
886	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
887
888	nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
889	if (!nm_i->nat_bitmap)
890		return -ENOMEM;
891	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
892	if (!version_bitmap)
893		return -EFAULT;
894
895	/* copy version bitmap */
896	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
897	return f2fs_init_nid_bitmap(sbi);
898}
899
900int build_node_manager(struct f2fs_sb_info *sbi)
901{
902	int err;
903	sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
904	if (!sbi->nm_info)
905		return -ENOMEM;
906
907	err = init_node_manager(sbi);
908	if (err)
909		return err;
910
911	return 0;
912}
913
914int build_sit_info(struct f2fs_sb_info *sbi)
915{
916	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
917	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
918	struct sit_info *sit_i;
919	unsigned int sit_segs, start;
920	char *src_bitmap, *dst_bitmap;
921	unsigned int bitmap_size;
922
923	sit_i = malloc(sizeof(struct sit_info));
924	if (!sit_i)
925		return -ENOMEM;
926
927	SM_I(sbi)->sit_info = sit_i;
928
929	sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1);
930	if (!sit_i->sentries)
931		return -ENOMEM;
932
933	for (start = 0; start < TOTAL_SEGS(sbi); start++) {
934		sit_i->sentries[start].cur_valid_map
935			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
936		sit_i->sentries[start].ckpt_valid_map
937			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
938		if (!sit_i->sentries[start].cur_valid_map
939				|| !sit_i->sentries[start].ckpt_valid_map)
940			return -ENOMEM;
941	}
942
943	sit_segs = get_sb(segment_count_sit) >> 1;
944	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
945	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
946
947	dst_bitmap = malloc(bitmap_size);
948	memcpy(dst_bitmap, src_bitmap, bitmap_size);
949
950	sit_i->sit_base_addr = get_sb(sit_blkaddr);
951	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
952	sit_i->written_valid_blocks = get_cp(valid_block_count);
953	sit_i->sit_bitmap = dst_bitmap;
954	sit_i->bitmap_size = bitmap_size;
955	sit_i->dirty_sentries = 0;
956	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
957	sit_i->elapsed_time = get_cp(elapsed_time);
958	return 0;
959}
960
961void reset_curseg(struct f2fs_sb_info *sbi, int type)
962{
963	struct curseg_info *curseg = CURSEG_I(sbi, type);
964	struct summary_footer *sum_footer;
965	struct seg_entry *se;
966
967	sum_footer = &(curseg->sum_blk->footer);
968	memset(sum_footer, 0, sizeof(struct summary_footer));
969	if (IS_DATASEG(type))
970		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
971	if (IS_NODESEG(type))
972		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
973	se = get_seg_entry(sbi, curseg->segno);
974	se->type = type;
975}
976
977static void read_compacted_summaries(struct f2fs_sb_info *sbi)
978{
979	struct curseg_info *curseg;
980	unsigned int i, j, offset;
981	block_t start;
982	char *kaddr;
983	int ret;
984
985	start = start_sum_block(sbi);
986
987	kaddr = (char *)malloc(PAGE_SIZE);
988	ret = dev_read_block(kaddr, start++);
989	ASSERT(ret >= 0);
990
991	curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
992	memcpy(&curseg->sum_blk->journal.n_nats, kaddr, SUM_JOURNAL_SIZE);
993
994	curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
995	memcpy(&curseg->sum_blk->journal.n_sits, kaddr + SUM_JOURNAL_SIZE,
996						SUM_JOURNAL_SIZE);
997
998	offset = 2 * SUM_JOURNAL_SIZE;
999	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1000		unsigned short blk_off;
1001		struct curseg_info *curseg = CURSEG_I(sbi, i);
1002
1003		reset_curseg(sbi, i);
1004
1005		if (curseg->alloc_type == SSR)
1006			blk_off = sbi->blocks_per_seg;
1007		else
1008			blk_off = curseg->next_blkoff;
1009
1010		ASSERT(blk_off <= ENTRIES_IN_SUM);
1011
1012		for (j = 0; j < blk_off; j++) {
1013			struct f2fs_summary *s;
1014			s = (struct f2fs_summary *)(kaddr + offset);
1015			curseg->sum_blk->entries[j] = *s;
1016			offset += SUMMARY_SIZE;
1017			if (offset + SUMMARY_SIZE <=
1018					PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
1019				continue;
1020			memset(kaddr, 0, PAGE_SIZE);
1021			ret = dev_read_block(kaddr, start++);
1022			ASSERT(ret >= 0);
1023			offset = 0;
1024		}
1025	}
1026	free(kaddr);
1027}
1028
1029static void restore_node_summary(struct f2fs_sb_info *sbi,
1030		unsigned int segno, struct f2fs_summary_block *sum_blk)
1031{
1032	struct f2fs_node *node_blk;
1033	struct f2fs_summary *sum_entry;
1034	block_t addr;
1035	unsigned int i;
1036	int ret;
1037
1038	node_blk = malloc(F2FS_BLKSIZE);
1039	ASSERT(node_blk);
1040
1041	/* scan the node segment */
1042	addr = START_BLOCK(sbi, segno);
1043	sum_entry = &sum_blk->entries[0];
1044
1045	for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
1046		ret = dev_read_block(node_blk, addr);
1047		ASSERT(ret >= 0);
1048		sum_entry->nid = node_blk->footer.nid;
1049		addr++;
1050	}
1051	free(node_blk);
1052}
1053
1054static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1055{
1056	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1057	struct f2fs_summary_block *sum_blk;
1058	struct curseg_info *curseg;
1059	unsigned int segno = 0;
1060	block_t blk_addr = 0;
1061	int ret;
1062
1063	if (IS_DATASEG(type)) {
1064		segno = get_cp(cur_data_segno[type]);
1065		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1066			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1067		else
1068			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1069	} else {
1070		segno = get_cp(cur_node_segno[type - CURSEG_HOT_NODE]);
1071		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1072			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1073							type - CURSEG_HOT_NODE);
1074		else
1075			blk_addr = GET_SUM_BLKADDR(sbi, segno);
1076	}
1077
1078	sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE);
1079	ret = dev_read_block(sum_blk, blk_addr);
1080	ASSERT(ret >= 0);
1081
1082	if (IS_NODESEG(type) && !is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1083		restore_node_summary(sbi, segno, sum_blk);
1084
1085	curseg = CURSEG_I(sbi, type);
1086	memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE);
1087	reset_curseg(sbi, type);
1088	free(sum_blk);
1089}
1090
1091void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
1092					struct f2fs_summary *sum)
1093{
1094	struct f2fs_summary_block *sum_blk;
1095	u32 segno, offset;
1096	int type, ret;
1097	struct seg_entry *se;
1098
1099	segno = GET_SEGNO(sbi, blk_addr);
1100	offset = OFFSET_IN_SEG(sbi, blk_addr);
1101
1102	se = get_seg_entry(sbi, segno);
1103
1104	sum_blk = get_sum_block(sbi, segno, &type);
1105	memcpy(&sum_blk->entries[offset], sum, sizeof(*sum));
1106	sum_blk->footer.entry_type = IS_NODESEG(se->type) ? SUM_TYPE_NODE :
1107							SUM_TYPE_DATA;
1108
1109	/* write SSA all the time */
1110	ret = dev_write_block(sum_blk, GET_SUM_BLKADDR(sbi, segno));
1111	ASSERT(ret >= 0);
1112
1113	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
1114					type == SEG_TYPE_MAX)
1115		free(sum_blk);
1116}
1117
1118static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
1119{
1120	int type = CURSEG_HOT_DATA;
1121
1122	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1123		read_compacted_summaries(sbi);
1124		type = CURSEG_HOT_NODE;
1125	}
1126
1127	for (; type <= CURSEG_COLD_NODE; type++)
1128		read_normal_summaries(sbi, type);
1129}
1130
1131static void build_curseg(struct f2fs_sb_info *sbi)
1132{
1133	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1134	struct curseg_info *array;
1135	unsigned short blk_off;
1136	unsigned int segno;
1137	int i;
1138
1139	array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
1140	ASSERT(array);
1141
1142	SM_I(sbi)->curseg_array = array;
1143
1144	for (i = 0; i < NR_CURSEG_TYPE; i++) {
1145		array[i].sum_blk = malloc(PAGE_CACHE_SIZE);
1146		ASSERT(array[i].sum_blk);
1147		if (i <= CURSEG_COLD_DATA) {
1148			blk_off = get_cp(cur_data_blkoff[i]);
1149			segno = get_cp(cur_data_segno[i]);
1150		}
1151		if (i > CURSEG_COLD_DATA) {
1152			blk_off = get_cp(cur_node_blkoff[i - CURSEG_HOT_NODE]);
1153			segno = get_cp(cur_node_segno[i - CURSEG_HOT_NODE]);
1154		}
1155		ASSERT(segno < TOTAL_SEGS(sbi));
1156		ASSERT(blk_off < DEFAULT_BLOCKS_PER_SEGMENT);
1157
1158		array[i].segno = segno;
1159		array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
1160		array[i].next_segno = NULL_SEGNO;
1161		array[i].next_blkoff = blk_off;
1162		array[i].alloc_type = cp->alloc_type[i];
1163	}
1164	restore_curseg_summaries(sbi);
1165}
1166
1167static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
1168{
1169	unsigned int end_segno = SM_I(sbi)->segment_count - 1;
1170	ASSERT(segno <= end_segno);
1171}
1172
1173struct f2fs_sit_block *get_current_sit_page(struct f2fs_sb_info *sbi,
1174						unsigned int segno)
1175{
1176	struct sit_info *sit_i = SIT_I(sbi);
1177	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1178	block_t blk_addr = sit_i->sit_base_addr + offset;
1179	struct f2fs_sit_block *sit_blk;
1180	int ret;
1181
1182	sit_blk = calloc(BLOCK_SZ, 1);
1183	ASSERT(sit_blk);
1184	check_seg_range(sbi, segno);
1185
1186	/* calculate sit block address */
1187	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1188		blk_addr += sit_i->sit_blocks;
1189
1190	ret = dev_read_block(sit_blk, blk_addr);
1191	ASSERT(ret >= 0);
1192
1193	return sit_blk;
1194}
1195
1196void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
1197			unsigned int segno, struct f2fs_sit_block *sit_blk)
1198{
1199	struct sit_info *sit_i = SIT_I(sbi);
1200	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1201	block_t blk_addr = sit_i->sit_base_addr + offset;
1202	int ret;
1203
1204	/* calculate sit block address */
1205	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1206		blk_addr += sit_i->sit_blocks;
1207
1208	ret = dev_write_block(sit_blk, blk_addr);
1209	ASSERT(ret >= 0);
1210}
1211
1212void check_block_count(struct f2fs_sb_info *sbi,
1213		unsigned int segno, struct f2fs_sit_entry *raw_sit)
1214{
1215	struct f2fs_sm_info *sm_info = SM_I(sbi);
1216	unsigned int end_segno = sm_info->segment_count - 1;
1217	int valid_blocks = 0;
1218	unsigned int i;
1219
1220	/* check segment usage */
1221	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
1222		ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
1223				segno, GET_SIT_VBLOCKS(raw_sit));
1224
1225	/* check boundary of a given segment number */
1226	if (segno > end_segno)
1227		ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
1228
1229	/* check bitmap with valid block count */
1230	for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1231		valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
1232
1233	if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
1234		ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
1235				segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
1236
1237	if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
1238		ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
1239				segno, GET_SIT_TYPE(raw_sit));
1240}
1241
1242void seg_info_from_raw_sit(struct seg_entry *se,
1243		struct f2fs_sit_entry *raw_sit)
1244{
1245	se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
1246	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(raw_sit);
1247	memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
1248	memcpy(se->ckpt_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
1249	se->type = GET_SIT_TYPE(raw_sit);
1250	se->orig_type = GET_SIT_TYPE(raw_sit);
1251	se->mtime = le64_to_cpu(raw_sit->mtime);
1252}
1253
1254struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
1255		unsigned int segno)
1256{
1257	struct sit_info *sit_i = SIT_I(sbi);
1258	return &sit_i->sentries[segno];
1259}
1260
1261struct f2fs_summary_block *get_sum_block(struct f2fs_sb_info *sbi,
1262				unsigned int segno, int *ret_type)
1263{
1264	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1265	struct f2fs_summary_block *sum_blk;
1266	struct curseg_info *curseg;
1267	int type, ret;
1268	u64 ssa_blk;
1269
1270	*ret_type= SEG_TYPE_MAX;
1271
1272	ssa_blk = GET_SUM_BLKADDR(sbi, segno);
1273	for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
1274		if (segno == get_cp(cur_node_segno[type])) {
1275			curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
1276			if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
1277				ASSERT_MSG("segno [0x%x] indicates a data "
1278						"segment, but should be node",
1279						segno);
1280				*ret_type = -SEG_TYPE_CUR_NODE;
1281			} else {
1282				*ret_type = SEG_TYPE_CUR_NODE;
1283			}
1284			return curseg->sum_blk;
1285		}
1286	}
1287
1288	for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
1289		if (segno == get_cp(cur_data_segno[type])) {
1290			curseg = CURSEG_I(sbi, type);
1291			if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
1292				ASSERT_MSG("segno [0x%x] indicates a node "
1293						"segment, but should be data",
1294						segno);
1295				*ret_type = -SEG_TYPE_CUR_DATA;
1296			} else {
1297				*ret_type = SEG_TYPE_CUR_DATA;
1298			}
1299			return curseg->sum_blk;
1300		}
1301	}
1302
1303	sum_blk = calloc(BLOCK_SZ, 1);
1304	ASSERT(sum_blk);
1305
1306	ret = dev_read_block(sum_blk, ssa_blk);
1307	ASSERT(ret >= 0);
1308
1309	if (IS_SUM_NODE_SEG(sum_blk->footer))
1310		*ret_type = SEG_TYPE_NODE;
1311	else if (IS_SUM_DATA_SEG(sum_blk->footer))
1312		*ret_type = SEG_TYPE_DATA;
1313
1314	return sum_blk;
1315}
1316
1317int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
1318				struct f2fs_summary *sum_entry)
1319{
1320	struct f2fs_summary_block *sum_blk;
1321	u32 segno, offset;
1322	int type;
1323
1324	segno = GET_SEGNO(sbi, blk_addr);
1325	offset = OFFSET_IN_SEG(sbi, blk_addr);
1326
1327	sum_blk = get_sum_block(sbi, segno, &type);
1328	memcpy(sum_entry, &(sum_blk->entries[offset]),
1329				sizeof(struct f2fs_summary));
1330	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
1331					type == SEG_TYPE_MAX)
1332		free(sum_blk);
1333	return type;
1334}
1335
1336static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
1337				struct f2fs_nat_entry *raw_nat)
1338{
1339	struct f2fs_nat_block *nat_block;
1340	pgoff_t block_addr;
1341	int entry_off;
1342	int ret;
1343
1344	if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
1345		return;
1346
1347	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1348	ASSERT(nat_block);
1349
1350	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1351	block_addr = current_nat_addr(sbi, nid);
1352
1353	ret = dev_read_block(nat_block, block_addr);
1354	ASSERT(ret >= 0);
1355
1356	memcpy(raw_nat, &nat_block->entries[entry_off],
1357					sizeof(struct f2fs_nat_entry));
1358	free(nat_block);
1359}
1360
1361void update_data_blkaddr(struct f2fs_sb_info *sbi, nid_t nid,
1362				u16 ofs_in_node, block_t newaddr)
1363{
1364	struct f2fs_node *node_blk = NULL;
1365	struct node_info ni;
1366	block_t oldaddr, startaddr, endaddr;
1367	int ret;
1368
1369	node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
1370	ASSERT(node_blk);
1371
1372	get_node_info(sbi, nid, &ni);
1373
1374	/* read node_block */
1375	ret = dev_read_block(node_blk, ni.blk_addr);
1376	ASSERT(ret >= 0);
1377
1378	/* check its block address */
1379	if (node_blk->footer.nid == node_blk->footer.ino) {
1380		int ofs = get_extra_isize(node_blk);
1381
1382		oldaddr = le32_to_cpu(node_blk->i.i_addr[ofs + ofs_in_node]);
1383		node_blk->i.i_addr[ofs + ofs_in_node] = cpu_to_le32(newaddr);
1384	} else {
1385		oldaddr = le32_to_cpu(node_blk->dn.addr[ofs_in_node]);
1386		node_blk->dn.addr[ofs_in_node] = cpu_to_le32(newaddr);
1387	}
1388
1389	ret = dev_write_block(node_blk, ni.blk_addr);
1390	ASSERT(ret >= 0);
1391
1392	/* check extent cache entry */
1393	if (node_blk->footer.nid != node_blk->footer.ino) {
1394		get_node_info(sbi, le32_to_cpu(node_blk->footer.ino), &ni);
1395
1396		/* read inode block */
1397		ret = dev_read_block(node_blk, ni.blk_addr);
1398		ASSERT(ret >= 0);
1399	}
1400
1401	startaddr = le32_to_cpu(node_blk->i.i_ext.blk_addr);
1402	endaddr = startaddr + le32_to_cpu(node_blk->i.i_ext.len);
1403	if (oldaddr >= startaddr && oldaddr < endaddr) {
1404		node_blk->i.i_ext.len = 0;
1405
1406		/* update inode block */
1407		ret = dev_write_block(node_blk, ni.blk_addr);
1408		ASSERT(ret >= 0);
1409	}
1410	free(node_blk);
1411}
1412
1413void update_nat_blkaddr(struct f2fs_sb_info *sbi, nid_t ino,
1414					nid_t nid, block_t newaddr)
1415{
1416	struct f2fs_nat_block *nat_block;
1417	pgoff_t block_addr;
1418	int entry_off;
1419	int ret;
1420
1421	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1422	ASSERT(nat_block);
1423
1424	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1425	block_addr = current_nat_addr(sbi, nid);
1426
1427	ret = dev_read_block(nat_block, block_addr);
1428	ASSERT(ret >= 0);
1429
1430	if (ino)
1431		nat_block->entries[entry_off].ino = cpu_to_le32(ino);
1432	nat_block->entries[entry_off].block_addr = cpu_to_le32(newaddr);
1433
1434	ret = dev_write_block(nat_block, block_addr);
1435	ASSERT(ret >= 0);
1436	free(nat_block);
1437}
1438
1439void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
1440{
1441	struct f2fs_nat_entry raw_nat;
1442	get_nat_entry(sbi, nid, &raw_nat);
1443	ni->nid = nid;
1444	node_info_from_raw_nat(ni, &raw_nat);
1445}
1446
1447void build_sit_entries(struct f2fs_sb_info *sbi)
1448{
1449	struct sit_info *sit_i = SIT_I(sbi);
1450	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1451	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1452	struct seg_entry *se;
1453	struct f2fs_sit_entry sit;
1454	unsigned int i, segno;
1455
1456	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1457		se = &sit_i->sentries[segno];
1458		struct f2fs_sit_block *sit_blk;
1459
1460		sit_blk = get_current_sit_page(sbi, segno);
1461		sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1462		free(sit_blk);
1463
1464		check_block_count(sbi, segno, &sit);
1465		seg_info_from_raw_sit(se, &sit);
1466	}
1467
1468	for (i = 0; i < sits_in_cursum(journal); i++) {
1469		segno = le32_to_cpu(segno_in_journal(journal, i));
1470		se = &sit_i->sentries[segno];
1471		sit = sit_in_journal(journal, i);
1472
1473		check_block_count(sbi, segno, &sit);
1474		seg_info_from_raw_sit(se, &sit);
1475	}
1476
1477}
1478
1479int build_segment_manager(struct f2fs_sb_info *sbi)
1480{
1481	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1482	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1483	struct f2fs_sm_info *sm_info;
1484
1485	sm_info = malloc(sizeof(struct f2fs_sm_info));
1486	if (!sm_info)
1487		return -ENOMEM;
1488
1489	/* init sm info */
1490	sbi->sm_info = sm_info;
1491	sm_info->seg0_blkaddr = get_sb(segment0_blkaddr);
1492	sm_info->main_blkaddr = get_sb(main_blkaddr);
1493	sm_info->segment_count = get_sb(segment_count);
1494	sm_info->reserved_segments = get_cp(rsvd_segment_count);
1495	sm_info->ovp_segments = get_cp(overprov_segment_count);
1496	sm_info->main_segments = get_sb(segment_count_main);
1497	sm_info->ssa_blkaddr = get_sb(ssa_blkaddr);
1498
1499	build_sit_info(sbi);
1500
1501	build_curseg(sbi);
1502
1503	build_sit_entries(sbi);
1504
1505	return 0;
1506}
1507
1508void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
1509{
1510	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1511	struct f2fs_sm_info *sm_i = SM_I(sbi);
1512	unsigned int segno = 0;
1513	char *ptr = NULL;
1514	u32 sum_vblocks = 0;
1515	u32 free_segs = 0;
1516	struct seg_entry *se;
1517
1518	fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
1519	fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
1520	ASSERT(fsck->sit_area_bitmap);
1521	ptr = fsck->sit_area_bitmap;
1522
1523	ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
1524
1525	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1526		se = get_seg_entry(sbi, segno);
1527
1528		memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1529		ptr += SIT_VBLOCK_MAP_SIZE;
1530
1531		if (se->valid_blocks == 0x0) {
1532			if (le32_to_cpu(sbi->ckpt->cur_node_segno[0]) == segno ||
1533				le32_to_cpu(sbi->ckpt->cur_data_segno[0]) == segno ||
1534				le32_to_cpu(sbi->ckpt->cur_node_segno[1]) == segno ||
1535				le32_to_cpu(sbi->ckpt->cur_data_segno[1]) == segno ||
1536				le32_to_cpu(sbi->ckpt->cur_node_segno[2]) == segno ||
1537				le32_to_cpu(sbi->ckpt->cur_data_segno[2]) == segno) {
1538				continue;
1539			} else {
1540				free_segs++;
1541			}
1542		} else {
1543			sum_vblocks += se->valid_blocks;
1544		}
1545	}
1546	fsck->chk.sit_valid_blocks = sum_vblocks;
1547	fsck->chk.sit_free_segs = free_segs;
1548
1549	DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
1550			sum_vblocks, sum_vblocks,
1551			free_segs, free_segs);
1552}
1553
1554void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
1555{
1556	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1557	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1558	struct sit_info *sit_i = SIT_I(sbi);
1559	unsigned int segno = 0;
1560	struct f2fs_summary_block *sum = curseg->sum_blk;
1561	char *ptr = NULL;
1562
1563	/* remove sit journal */
1564	sum->journal.n_sits = 0;
1565
1566	ptr = fsck->main_area_bitmap;
1567
1568	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1569		struct f2fs_sit_block *sit_blk;
1570		struct f2fs_sit_entry *sit;
1571		struct seg_entry *se;
1572		u16 valid_blocks = 0;
1573		u16 type;
1574		int i;
1575
1576		sit_blk = get_current_sit_page(sbi, segno);
1577		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1578		memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
1579
1580		/* update valid block count */
1581		for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1582			valid_blocks += get_bits_in_byte(sit->valid_map[i]);
1583
1584		se = get_seg_entry(sbi, segno);
1585		memcpy(se->cur_valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
1586		se->valid_blocks = valid_blocks;
1587		type = se->type;
1588		if (type >= NO_CHECK_TYPE) {
1589			ASSERT_MSG("Invalide type and valid blocks=%x,%x",
1590					segno, valid_blocks);
1591			type = 0;
1592		}
1593		sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
1594								valid_blocks);
1595		rewrite_current_sit_page(sbi, segno, sit_blk);
1596		free(sit_blk);
1597
1598		ptr += SIT_VBLOCK_MAP_SIZE;
1599	}
1600}
1601
1602static int flush_sit_journal_entries(struct f2fs_sb_info *sbi)
1603{
1604	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1605	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1606	struct sit_info *sit_i = SIT_I(sbi);
1607	unsigned int segno;
1608	int i;
1609
1610	for (i = 0; i < sits_in_cursum(journal); i++) {
1611		struct f2fs_sit_block *sit_blk;
1612		struct f2fs_sit_entry *sit;
1613		struct seg_entry *se;
1614
1615		segno = segno_in_journal(journal, i);
1616		se = get_seg_entry(sbi, segno);
1617
1618		sit_blk = get_current_sit_page(sbi, segno);
1619		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1620
1621		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1622		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
1623							se->valid_blocks);
1624		sit->mtime = cpu_to_le64(se->mtime);
1625
1626		rewrite_current_sit_page(sbi, segno, sit_blk);
1627		free(sit_blk);
1628	}
1629
1630	journal->n_sits = 0;
1631	return i;
1632}
1633
1634static int flush_nat_journal_entries(struct f2fs_sb_info *sbi)
1635{
1636	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1637	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1638	struct f2fs_nat_block *nat_block;
1639	pgoff_t block_addr;
1640	int entry_off;
1641	nid_t nid;
1642	int ret;
1643	int i = 0;
1644
1645	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1646	ASSERT(nat_block);
1647next:
1648	if (i >= nats_in_cursum(journal)) {
1649		free(nat_block);
1650		journal->n_nats = 0;
1651		return i;
1652	}
1653
1654	nid = le32_to_cpu(nid_in_journal(journal, i));
1655
1656	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1657	block_addr = current_nat_addr(sbi, nid);
1658
1659	ret = dev_read_block(nat_block, block_addr);
1660	ASSERT(ret >= 0);
1661
1662	memcpy(&nat_block->entries[entry_off], &nat_in_journal(journal, i),
1663					sizeof(struct f2fs_nat_entry));
1664
1665	ret = dev_write_block(nat_block, block_addr);
1666	ASSERT(ret >= 0);
1667	i++;
1668	goto next;
1669}
1670
1671void flush_journal_entries(struct f2fs_sb_info *sbi)
1672{
1673	int n_nats = flush_nat_journal_entries(sbi);
1674	int n_sits = flush_sit_journal_entries(sbi);
1675
1676	if (n_nats || n_sits)
1677		write_checkpoint(sbi);
1678}
1679
1680void flush_sit_entries(struct f2fs_sb_info *sbi)
1681{
1682	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1683	struct sit_info *sit_i = SIT_I(sbi);
1684	unsigned int segno = 0;
1685	u32 free_segs = 0;
1686
1687	/* update free segments */
1688	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1689		struct f2fs_sit_block *sit_blk;
1690		struct f2fs_sit_entry *sit;
1691		struct seg_entry *se;
1692
1693		se = get_seg_entry(sbi, segno);
1694
1695		if (!se->dirty)
1696			continue;
1697
1698		sit_blk = get_current_sit_page(sbi, segno);
1699		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1700		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1701		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
1702							se->valid_blocks);
1703		rewrite_current_sit_page(sbi, segno, sit_blk);
1704		free(sit_blk);
1705
1706		if (se->valid_blocks == 0x0 &&
1707				!IS_CUR_SEGNO(sbi, segno, NO_CHECK_TYPE))
1708			free_segs++;
1709	}
1710
1711	set_cp(free_segment_count, free_segs);
1712}
1713
1714int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left, int type)
1715{
1716	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1717	struct seg_entry *se;
1718	u32 segno;
1719	u64 offset;
1720	int not_enough = 0;
1721	u64 end_blkaddr = (get_sb(segment_count_main) <<
1722			get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
1723
1724	if (get_free_segments(sbi) <= SM_I(sbi)->reserved_segments + 1)
1725		not_enough = 1;
1726
1727	while (*to >= SM_I(sbi)->main_blkaddr && *to < end_blkaddr) {
1728		segno = GET_SEGNO(sbi, *to);
1729		offset = OFFSET_IN_SEG(sbi, *to);
1730
1731		se = get_seg_entry(sbi, segno);
1732
1733		if (se->valid_blocks == sbi->blocks_per_seg ||
1734				IS_CUR_SEGNO(sbi, segno, type)) {
1735			*to = left ? START_BLOCK(sbi, segno) - 1:
1736						START_BLOCK(sbi, segno + 1);
1737			continue;
1738		}
1739
1740		if (se->valid_blocks == 0 && not_enough) {
1741			*to = left ? START_BLOCK(sbi, segno) - 1:
1742						START_BLOCK(sbi, segno + 1);
1743			continue;
1744		}
1745
1746		if (se->valid_blocks == 0 && !(segno % sbi->segs_per_sec)) {
1747			struct seg_entry *se2;
1748			unsigned int i;
1749
1750			for (i = 1; i < sbi->segs_per_sec; i++) {
1751				se2 = get_seg_entry(sbi, segno + i);
1752				if (se2->valid_blocks)
1753					break;
1754			}
1755			if (i == sbi->segs_per_sec)
1756				return 0;
1757		}
1758
1759		if (se->type == type &&
1760			!f2fs_test_bit(offset, (const char *)se->cur_valid_map))
1761			return 0;
1762
1763		*to = left ? *to - 1: *to + 1;
1764	}
1765	return -1;
1766}
1767
1768void move_curseg_info(struct f2fs_sb_info *sbi, u64 from)
1769{
1770	int i, ret;
1771
1772	/* update summary blocks having nullified journal entries */
1773	for (i = 0; i < NO_CHECK_TYPE; i++) {
1774		struct curseg_info *curseg = CURSEG_I(sbi, i);
1775		struct f2fs_summary_block buf;
1776		u32 old_segno;
1777		u64 ssa_blk, to;
1778
1779		/* update original SSA too */
1780		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
1781		ret = dev_write_block(curseg->sum_blk, ssa_blk);
1782		ASSERT(ret >= 0);
1783
1784		to = from;
1785		ret = find_next_free_block(sbi, &to, 0, i);
1786		ASSERT(ret == 0);
1787
1788		old_segno = curseg->segno;
1789		curseg->segno = GET_SEGNO(sbi, to);
1790		curseg->next_blkoff = OFFSET_IN_SEG(sbi, to);
1791		curseg->alloc_type = SSR;
1792
1793		/* update new segno */
1794		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
1795		ret = dev_read_block(&buf, ssa_blk);
1796		ASSERT(ret >= 0);
1797
1798		memcpy(curseg->sum_blk, &buf, SUM_ENTRIES_SIZE);
1799
1800		/* update se->types */
1801		reset_curseg(sbi, i);
1802
1803		DBG(1, "Move curseg[%d] %x -> %x after %"PRIx64"\n",
1804				i, old_segno, curseg->segno, from);
1805	}
1806}
1807
1808void zero_journal_entries(struct f2fs_sb_info *sbi)
1809{
1810	int i;
1811
1812	for (i = 0; i < NO_CHECK_TYPE; i++)
1813		CURSEG_I(sbi, i)->sum_blk->journal.n_nats = 0;
1814}
1815
1816void write_curseg_info(struct f2fs_sb_info *sbi)
1817{
1818	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1819	int i;
1820
1821	for (i = 0; i < NO_CHECK_TYPE; i++) {
1822		cp->alloc_type[i] = CURSEG_I(sbi, i)->alloc_type;
1823		if (i < CURSEG_HOT_NODE) {
1824			set_cp(cur_data_segno[i], CURSEG_I(sbi, i)->segno);
1825			set_cp(cur_data_blkoff[i],
1826					CURSEG_I(sbi, i)->next_blkoff);
1827		} else {
1828			int n = i - CURSEG_HOT_NODE;
1829
1830			set_cp(cur_node_segno[n], CURSEG_I(sbi, i)->segno);
1831			set_cp(cur_node_blkoff[n],
1832					CURSEG_I(sbi, i)->next_blkoff);
1833		}
1834	}
1835}
1836
1837int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
1838					struct f2fs_nat_entry *raw_nat)
1839{
1840	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1841	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1842	int i = 0;
1843
1844	for (i = 0; i < nats_in_cursum(journal); i++) {
1845		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
1846			memcpy(raw_nat, &nat_in_journal(journal, i),
1847						sizeof(struct f2fs_nat_entry));
1848			DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
1849			return i;
1850		}
1851	}
1852	return -1;
1853}
1854
1855void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
1856{
1857	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1858	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1859	struct f2fs_nat_block *nat_block;
1860	pgoff_t block_addr;
1861	int entry_off;
1862	int ret;
1863	int i = 0;
1864
1865	/* check in journal */
1866	for (i = 0; i < nats_in_cursum(journal); i++) {
1867		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
1868			memset(&nat_in_journal(journal, i), 0,
1869					sizeof(struct f2fs_nat_entry));
1870			FIX_MSG("Remove nid [0x%x] in nat journal\n", nid);
1871			return;
1872		}
1873	}
1874	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1875	ASSERT(nat_block);
1876
1877	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1878	block_addr = current_nat_addr(sbi, nid);
1879
1880	ret = dev_read_block(nat_block, block_addr);
1881	ASSERT(ret >= 0);
1882
1883	memset(&nat_block->entries[entry_off], 0,
1884					sizeof(struct f2fs_nat_entry));
1885
1886	ret = dev_write_block(nat_block, block_addr);
1887	ASSERT(ret >= 0);
1888	free(nat_block);
1889}
1890
1891void write_checkpoint(struct f2fs_sb_info *sbi)
1892{
1893	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1894	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1895	block_t orphan_blks = 0;
1896	unsigned long long cp_blk_no;
1897	u32 flags = CP_UMOUNT_FLAG;
1898	int i, ret;
1899	u_int32_t crc = 0;
1900
1901	if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
1902		orphan_blks = __start_sum_addr(sbi) - 1;
1903		flags |= CP_ORPHAN_PRESENT_FLAG;
1904	}
1905
1906	set_cp(free_segment_count, get_free_segments(sbi));
1907	set_cp(valid_block_count, sbi->total_valid_block_count);
1908	set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_sb(cp_payload));
1909
1910	flags = update_nat_bits_flags(sb, cp, flags);
1911	set_cp(ckpt_flags, flags);
1912
1913	crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, CHECKSUM_OFFSET);
1914	*((__le32 *)((unsigned char *)cp + CHECKSUM_OFFSET)) = cpu_to_le32(crc);
1915
1916	cp_blk_no = get_sb(cp_blkaddr);
1917	if (sbi->cur_cp == 2)
1918		cp_blk_no += 1 << get_sb(log_blocks_per_seg);
1919
1920	/* write the first cp */
1921	ret = dev_write_block(cp, cp_blk_no++);
1922	ASSERT(ret >= 0);
1923
1924	/* skip payload */
1925	cp_blk_no += get_sb(cp_payload);
1926	/* skip orphan blocks */
1927	cp_blk_no += orphan_blks;
1928
1929	/* update summary blocks having nullified journal entries */
1930	for (i = 0; i < NO_CHECK_TYPE; i++) {
1931		struct curseg_info *curseg = CURSEG_I(sbi, i);
1932		u64 ssa_blk;
1933
1934		ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
1935		ASSERT(ret >= 0);
1936
1937		/* update original SSA too */
1938		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
1939		ret = dev_write_block(curseg->sum_blk, ssa_blk);
1940		ASSERT(ret >= 0);
1941	}
1942
1943	/* Write nat bits */
1944	if (flags & CP_NAT_BITS_FLAG)
1945		write_nat_bits(sbi, sb, cp, sbi->cur_cp);
1946
1947	/* in case of sudden power off */
1948	f2fs_fsync_device();
1949
1950	/* write the last cp */
1951	ret = dev_write_block(cp, cp_blk_no++);
1952	ASSERT(ret >= 0);
1953}
1954
1955void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
1956{
1957	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1958	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1959	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1960	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1961	struct f2fs_nm_info *nm_i = NM_I(sbi);
1962	struct f2fs_nat_block *nat_block;
1963	struct node_info ni;
1964	u32 nid, nr_nat_blks;
1965	pgoff_t block_off;
1966	pgoff_t block_addr;
1967	int seg_off;
1968	int ret;
1969	unsigned int i;
1970
1971	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1972	ASSERT(nat_block);
1973
1974	/* Alloc & build nat entry bitmap */
1975	nr_nat_blks = (get_sb(segment_count_nat) / 2) <<
1976					sbi->log_blocks_per_seg;
1977
1978	fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
1979	fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
1980	fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
1981	ASSERT(fsck->nat_area_bitmap);
1982
1983	fsck->entries = calloc(sizeof(struct f2fs_nat_entry),
1984					fsck->nr_nat_entries);
1985	ASSERT(fsck->entries);
1986
1987	for (block_off = 0; block_off < nr_nat_blks; block_off++) {
1988
1989		seg_off = block_off >> sbi->log_blocks_per_seg;
1990		block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1991			(seg_off << sbi->log_blocks_per_seg << 1) +
1992			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
1993
1994		if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
1995			block_addr += sbi->blocks_per_seg;
1996
1997		ret = dev_read_block(nat_block, block_addr);
1998		ASSERT(ret >= 0);
1999
2000		nid = block_off * NAT_ENTRY_PER_BLOCK;
2001		for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
2002			ni.nid = nid + i;
2003
2004			if ((nid + i) == F2FS_NODE_INO(sbi) ||
2005					(nid + i) == F2FS_META_INO(sbi)) {
2006				/* block_addr of node/meta inode should be 0x1 */
2007				if (le32_to_cpu(nat_block->entries[i].block_addr) != 0x1) {
2008					FIX_MSG("ino: 0x%x node/meta inode, block_addr= 0x%x -> 0x1",
2009							nid + i, le32_to_cpu(nat_block->entries[i].block_addr));
2010					nat_block->entries[i].block_addr = cpu_to_le32(0x1);
2011					ret = dev_write_block(nat_block, block_addr);
2012					ASSERT(ret >= 0);
2013				}
2014				continue;
2015			}
2016
2017			node_info_from_raw_nat(&ni, &nat_block->entries[i]);
2018			if (ni.blk_addr == 0x0)
2019				continue;
2020			if (ni.ino == 0x0) {
2021				ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
2022					" is invalid\n", ni.ino, ni.blk_addr);
2023			}
2024			if (ni.ino == (nid + i)) {
2025				fsck->nat_valid_inode_cnt++;
2026				DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
2027			}
2028			if (nid + i == 0) {
2029				/*
2030				 * nat entry [0] must be null.  If
2031				 * it is corrupted, set its bit in
2032				 * nat_area_bitmap, fsck_verify will
2033				 * nullify it
2034				 */
2035				ASSERT_MSG("Invalid nat entry[0]: "
2036					"blk_addr[0x%x]\n", ni.blk_addr);
2037				fsck->chk.valid_nat_entry_cnt--;
2038			}
2039
2040			DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
2041				nid + i, ni.blk_addr, ni.ino);
2042			f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
2043			fsck->chk.valid_nat_entry_cnt++;
2044
2045			fsck->entries[nid + i] = nat_block->entries[i];
2046		}
2047	}
2048
2049	/* Traverse nat journal, update the corresponding entries */
2050	for (i = 0; i < nats_in_cursum(journal); i++) {
2051		struct f2fs_nat_entry raw_nat;
2052		nid = le32_to_cpu(nid_in_journal(journal, i));
2053		ni.nid = nid;
2054
2055		DBG(3, "==> Found nid [0x%x] in nat cache, update it\n", nid);
2056
2057		/* Clear the original bit and count */
2058		if (fsck->entries[nid].block_addr != 0x0) {
2059			fsck->chk.valid_nat_entry_cnt--;
2060			f2fs_clear_bit(nid, fsck->nat_area_bitmap);
2061			if (fsck->entries[nid].ino == nid)
2062				fsck->nat_valid_inode_cnt--;
2063		}
2064
2065		/* Use nat entries in journal */
2066		memcpy(&raw_nat, &nat_in_journal(journal, i),
2067					sizeof(struct f2fs_nat_entry));
2068		node_info_from_raw_nat(&ni, &raw_nat);
2069		if (ni.blk_addr != 0x0) {
2070			if (ni.ino == 0x0)
2071				ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
2072					" is invalid\n", ni.ino, ni.blk_addr);
2073			if (ni.ino == nid) {
2074				fsck->nat_valid_inode_cnt++;
2075				DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
2076			}
2077			f2fs_set_bit(nid, fsck->nat_area_bitmap);
2078			fsck->chk.valid_nat_entry_cnt++;
2079			DBG(3, "nid[0x%x] in nat cache\n", nid);
2080		}
2081		fsck->entries[nid] = raw_nat;
2082	}
2083	free(nat_block);
2084
2085	DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
2086			fsck->chk.valid_nat_entry_cnt,
2087			fsck->chk.valid_nat_entry_cnt);
2088}
2089
2090static int check_sector_size(struct f2fs_super_block *sb)
2091{
2092	int index;
2093	u_int32_t log_sectorsize, log_sectors_per_block;
2094	u_int8_t *zero_buff;
2095
2096	log_sectorsize = log_base_2(c.sector_size);
2097	log_sectors_per_block = log_base_2(c.sectors_per_blk);
2098
2099	if (log_sectorsize == get_sb(log_sectorsize) &&
2100			log_sectors_per_block == get_sb(log_sectors_per_block))
2101		return 0;
2102
2103	zero_buff = calloc(F2FS_BLKSIZE, 1);
2104	ASSERT(zero_buff);
2105
2106	set_sb(log_sectorsize, log_sectorsize);
2107	set_sb(log_sectors_per_block, log_sectors_per_block);
2108
2109	memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
2110	DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
2111	for (index = 0; index < 2; index++) {
2112		if (dev_write(zero_buff, index * F2FS_BLKSIZE, F2FS_BLKSIZE)) {
2113			MSG(1, "\tError: Failed while writing supe_blk "
2114				"on disk!!! index : %d\n", index);
2115			free(zero_buff);
2116			return -1;
2117		}
2118	}
2119
2120	free(zero_buff);
2121	return 0;
2122}
2123
2124int f2fs_do_mount(struct f2fs_sb_info *sbi)
2125{
2126	struct f2fs_checkpoint *cp = NULL;
2127	struct f2fs_super_block *sb = NULL;
2128	int ret;
2129
2130	sbi->active_logs = NR_CURSEG_TYPE;
2131	ret = validate_super_block(sbi, 0);
2132	if (ret) {
2133		ret = validate_super_block(sbi, 1);
2134		if (ret)
2135			return -1;
2136	}
2137	sb = F2FS_RAW_SUPER(sbi);
2138
2139	ret = check_sector_size(sb);
2140	if (ret)
2141		return -1;
2142
2143	print_raw_sb_info(sb);
2144
2145	init_sb_info(sbi);
2146
2147	ret = get_valid_checkpoint(sbi);
2148	if (ret) {
2149		ERR_MSG("Can't find valid checkpoint\n");
2150		return -1;
2151	}
2152
2153	if (sanity_check_ckpt(sbi)) {
2154		ERR_MSG("Checkpoint is polluted\n");
2155		return -1;
2156	}
2157	cp = F2FS_CKPT(sbi);
2158
2159	print_ckpt_info(sbi);
2160
2161	if (c.auto_fix || c.preen_mode) {
2162		u32 flag = get_cp(ckpt_flags);
2163
2164		if (flag & CP_FSCK_FLAG ||
2165			(exist_qf_ino(sb) && (!(flag & CP_UMOUNT_FLAG) ||
2166						flag & CP_ERROR_FLAG))) {
2167			c.fix_on = 1;
2168		} else if (!c.preen_mode) {
2169			print_cp_state(flag);
2170			return 1;
2171		}
2172	}
2173
2174	c.bug_on = 0;
2175	c.feature = sb->feature;
2176
2177	/* precompute checksum seed for metadata */
2178	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
2179		c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
2180
2181	sbi->total_valid_node_count = get_cp(valid_node_count);
2182	sbi->total_valid_inode_count = get_cp(valid_inode_count);
2183	sbi->user_block_count = get_cp(user_block_count);
2184	sbi->total_valid_block_count = get_cp(valid_block_count);
2185	sbi->last_valid_block_count = sbi->total_valid_block_count;
2186	sbi->alloc_valid_block_count = 0;
2187
2188	if (build_segment_manager(sbi)) {
2189		ERR_MSG("build_segment_manager failed\n");
2190		return -1;
2191	}
2192
2193	if (build_node_manager(sbi)) {
2194		ERR_MSG("build_node_manager failed\n");
2195		return -1;
2196	}
2197
2198	/* Check nat_bits */
2199	if (is_set_ckpt_flags(cp, CP_NAT_BITS_FLAG)) {
2200		u_int32_t nat_bits_bytes, nat_bits_blocks;
2201		__le64 *kaddr;
2202		u_int32_t blk;
2203
2204		blk = get_sb(cp_blkaddr) + (1 << get_sb(log_blocks_per_seg));
2205		if (sbi->cur_cp == 2)
2206			blk += 1 << get_sb(log_blocks_per_seg);
2207
2208		nat_bits_bytes = get_sb(segment_count_nat) << 5;
2209		nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
2210				F2FS_BLKSIZE - 1);
2211		blk -= nat_bits_blocks;
2212
2213		kaddr = malloc(PAGE_SIZE);
2214		ret = dev_read_block(kaddr, blk);
2215		ASSERT(ret >= 0);
2216		if (*kaddr != get_cp_crc(cp))
2217			write_nat_bits(sbi, sb, cp, sbi->cur_cp);
2218		else
2219			MSG(0, "Info: Found valid nat_bits in checkpoint\n");
2220		free(kaddr);
2221	}
2222	return 0;
2223}
2224
2225void f2fs_do_umount(struct f2fs_sb_info *sbi)
2226{
2227	struct sit_info *sit_i = SIT_I(sbi);
2228	struct f2fs_sm_info *sm_i = SM_I(sbi);
2229	struct f2fs_nm_info *nm_i = NM_I(sbi);
2230	unsigned int i;
2231
2232	/* free nm_info */
2233	if (c.func == SLOAD || c.func == FSCK)
2234		free(nm_i->nid_bitmap);
2235	free(nm_i->nat_bitmap);
2236	free(sbi->nm_info);
2237
2238	/* free sit_info */
2239	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
2240		free(sit_i->sentries[i].cur_valid_map);
2241		free(sit_i->sentries[i].ckpt_valid_map);
2242	}
2243	free(sit_i->sit_bitmap);
2244	free(sm_i->sit_info);
2245
2246	/* free sm_info */
2247	for (i = 0; i < NR_CURSEG_TYPE; i++)
2248		free(sm_i->curseg_array[i].sum_blk);
2249
2250	free(sm_i->curseg_array);
2251	free(sbi->sm_info);
2252
2253	free(sbi->ckpt);
2254	free(sbi->raw_super);
2255}
2256