1/**
2 * mount.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 *             http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include "fsck.h"
12
13void print_inode_info(struct f2fs_inode *inode)
14{
15	unsigned int i = 0;
16	int namelen = le32_to_cpu(inode->i_namelen);
17
18	DISP_u32(inode, i_mode);
19	DISP_u32(inode, i_uid);
20	DISP_u32(inode, i_gid);
21	DISP_u32(inode, i_links);
22	DISP_u64(inode, i_size);
23	DISP_u64(inode, i_blocks);
24
25	DISP_u64(inode, i_atime);
26	DISP_u32(inode, i_atime_nsec);
27	DISP_u64(inode, i_ctime);
28	DISP_u32(inode, i_ctime_nsec);
29	DISP_u64(inode, i_mtime);
30	DISP_u32(inode, i_mtime_nsec);
31
32	DISP_u32(inode, i_generation);
33	DISP_u32(inode, i_current_depth);
34	DISP_u32(inode, i_xattr_nid);
35	DISP_u32(inode, i_flags);
36	DISP_u32(inode, i_inline);
37	DISP_u32(inode, i_pino);
38
39	if (namelen) {
40		DISP_u32(inode, i_namelen);
41		inode->i_name[namelen] = '\0';
42		DISP_utf(inode, i_name);
43	}
44
45	printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
46			inode->i_ext.fofs,
47			inode->i_ext.blk_addr,
48			inode->i_ext.len);
49
50	DISP_u32(inode, i_addr[0]);	/* Pointers to data blocks */
51	DISP_u32(inode, i_addr[1]);	/* Pointers to data blocks */
52	DISP_u32(inode, i_addr[2]);	/* Pointers to data blocks */
53	DISP_u32(inode, i_addr[3]);	/* Pointers to data blocks */
54
55	for (i = 4; i < ADDRS_PER_INODE(inode); i++) {
56		if (inode->i_addr[i] != 0x0) {
57			printf("i_addr[0x%x] points data block\r\t\t[0x%4x]\n",
58					i, inode->i_addr[i]);
59			break;
60		}
61	}
62
63	DISP_u32(inode, i_nid[0]);	/* direct */
64	DISP_u32(inode, i_nid[1]);	/* direct */
65	DISP_u32(inode, i_nid[2]);	/* indirect */
66	DISP_u32(inode, i_nid[3]);	/* indirect */
67	DISP_u32(inode, i_nid[4]);	/* double indirect */
68
69	printf("\n");
70}
71
72void print_node_info(struct f2fs_node *node_block)
73{
74	nid_t ino = le32_to_cpu(node_block->footer.ino);
75	nid_t nid = le32_to_cpu(node_block->footer.nid);
76	/* Is this inode? */
77	if (ino == nid) {
78		DBG(0, "Node ID [0x%x:%u] is inode\n", nid, nid);
79		print_inode_info(&node_block->i);
80	} else {
81		int i;
82		u32 *dump_blk = (u32 *)node_block;
83		DBG(0, "Node ID [0x%x:%u] is direct node or indirect node.\n",
84								nid, nid);
85		for (i = 0; i <= 10; i++)
86			MSG(0, "[%d]\t\t\t[0x%8x : %d]\n",
87						i, dump_blk[i], dump_blk[i]);
88	}
89}
90
91void print_raw_sb_info(struct f2fs_sb_info *sbi)
92{
93	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
94
95	if (!config.dbg_lv)
96		return;
97
98	printf("\n");
99	printf("+--------------------------------------------------------+\n");
100	printf("| Super block                                            |\n");
101	printf("+--------------------------------------------------------+\n");
102
103	DISP_u32(sb, magic);
104	DISP_u32(sb, major_ver);
105	DISP_u32(sb, minor_ver);
106	DISP_u32(sb, log_sectorsize);
107	DISP_u32(sb, log_sectors_per_block);
108
109	DISP_u32(sb, log_blocksize);
110	DISP_u32(sb, log_blocks_per_seg);
111	DISP_u32(sb, segs_per_sec);
112	DISP_u32(sb, secs_per_zone);
113	DISP_u32(sb, checksum_offset);
114	DISP_u64(sb, block_count);
115
116	DISP_u32(sb, section_count);
117	DISP_u32(sb, segment_count);
118	DISP_u32(sb, segment_count_ckpt);
119	DISP_u32(sb, segment_count_sit);
120	DISP_u32(sb, segment_count_nat);
121
122	DISP_u32(sb, segment_count_ssa);
123	DISP_u32(sb, segment_count_main);
124	DISP_u32(sb, segment0_blkaddr);
125
126	DISP_u32(sb, cp_blkaddr);
127	DISP_u32(sb, sit_blkaddr);
128	DISP_u32(sb, nat_blkaddr);
129	DISP_u32(sb, ssa_blkaddr);
130	DISP_u32(sb, main_blkaddr);
131
132	DISP_u32(sb, root_ino);
133	DISP_u32(sb, node_ino);
134	DISP_u32(sb, meta_ino);
135	DISP_u32(sb, cp_payload);
136	printf("\n");
137}
138
139void print_ckpt_info(struct f2fs_sb_info *sbi)
140{
141	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
142
143	if (!config.dbg_lv)
144		return;
145
146	printf("\n");
147	printf("+--------------------------------------------------------+\n");
148	printf("| Checkpoint                                             |\n");
149	printf("+--------------------------------------------------------+\n");
150
151	DISP_u64(cp, checkpoint_ver);
152	DISP_u64(cp, user_block_count);
153	DISP_u64(cp, valid_block_count);
154	DISP_u32(cp, rsvd_segment_count);
155	DISP_u32(cp, overprov_segment_count);
156	DISP_u32(cp, free_segment_count);
157
158	DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
159	DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
160	DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
161	DISP_u32(cp, cur_node_segno[0]);
162	DISP_u32(cp, cur_node_segno[1]);
163	DISP_u32(cp, cur_node_segno[2]);
164
165	DISP_u32(cp, cur_node_blkoff[0]);
166	DISP_u32(cp, cur_node_blkoff[1]);
167	DISP_u32(cp, cur_node_blkoff[2]);
168
169
170	DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
171	DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
172	DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
173	DISP_u32(cp, cur_data_segno[0]);
174	DISP_u32(cp, cur_data_segno[1]);
175	DISP_u32(cp, cur_data_segno[2]);
176
177	DISP_u32(cp, cur_data_blkoff[0]);
178	DISP_u32(cp, cur_data_blkoff[1]);
179	DISP_u32(cp, cur_data_blkoff[2]);
180
181	DISP_u32(cp, ckpt_flags);
182	DISP_u32(cp, cp_pack_total_block_count);
183	DISP_u32(cp, cp_pack_start_sum);
184	DISP_u32(cp, valid_node_count);
185	DISP_u32(cp, valid_inode_count);
186	DISP_u32(cp, next_free_nid);
187	DISP_u32(cp, sit_ver_bitmap_bytesize);
188	DISP_u32(cp, nat_ver_bitmap_bytesize);
189	DISP_u32(cp, checksum_offset);
190	DISP_u64(cp, elapsed_time);
191
192	DISP_u32(cp, sit_nat_version_bitmap[0]);
193	printf("\n\n");
194}
195
196int sanity_check_raw_super(struct f2fs_super_block *raw_super)
197{
198	unsigned int blocksize;
199
200	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
201		return -1;
202	}
203
204	if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
205		return -1;
206	}
207
208	blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
209	if (F2FS_BLKSIZE != blocksize) {
210		return -1;
211	}
212
213	if (F2FS_LOG_SECTOR_SIZE != le32_to_cpu(raw_super->log_sectorsize)) {
214		return -1;
215	}
216
217	if (F2FS_LOG_SECTORS_PER_BLOCK !=
218				le32_to_cpu(raw_super->log_sectors_per_block)) {
219		return -1;
220	}
221
222	return 0;
223}
224
225int validate_super_block(struct f2fs_sb_info *sbi, int block)
226{
227	u64 offset;
228	sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
229
230	if (block == 0)
231		offset = F2FS_SUPER_OFFSET;
232	else
233		offset = F2FS_BLKSIZE + F2FS_SUPER_OFFSET;
234
235	if (dev_read(sbi->raw_super, offset, sizeof(struct f2fs_super_block)))
236		return -1;
237
238	if (!sanity_check_raw_super(sbi->raw_super))
239		return 0;
240
241	free(sbi->raw_super);
242	MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", block);
243
244	return -EINVAL;
245}
246
247int init_sb_info(struct f2fs_sb_info *sbi)
248{
249	struct f2fs_super_block *raw_super = sbi->raw_super;
250
251	sbi->log_sectors_per_block =
252		le32_to_cpu(raw_super->log_sectors_per_block);
253	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
254	sbi->blocksize = 1 << sbi->log_blocksize;
255	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
256	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
257	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
258	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
259	sbi->total_sections = le32_to_cpu(raw_super->section_count);
260	sbi->total_node_count =
261		(le32_to_cpu(raw_super->segment_count_nat) / 2)
262		* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
263	sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
264	sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
265	sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
266	sbi->cur_victim_sec = NULL_SEGNO;
267	return 0;
268}
269
270void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
271				unsigned long long *version)
272{
273	void *cp_page_1, *cp_page_2;
274	struct f2fs_checkpoint *cp_block;
275	unsigned long blk_size = sbi->blocksize;
276	unsigned long long cur_version = 0, pre_version = 0;
277	unsigned int crc = 0;
278	size_t crc_offset;
279
280	/* Read the 1st cp block in this CP pack */
281	cp_page_1 = malloc(PAGE_SIZE);
282	if (dev_read_block(cp_page_1, cp_addr) < 0)
283		return NULL;
284
285	cp_block = (struct f2fs_checkpoint *)cp_page_1;
286	crc_offset = le32_to_cpu(cp_block->checksum_offset);
287	if (crc_offset >= blk_size)
288		goto invalid_cp1;
289
290	crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
291	if (f2fs_crc_valid(crc, cp_block, crc_offset))
292		goto invalid_cp1;
293
294	pre_version = le64_to_cpu(cp_block->checkpoint_ver);
295
296	/* Read the 2nd cp block in this CP pack */
297	cp_page_2 = malloc(PAGE_SIZE);
298	cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
299
300	if (dev_read_block(cp_page_2, cp_addr) < 0)
301		goto invalid_cp2;
302
303	cp_block = (struct f2fs_checkpoint *)cp_page_2;
304	crc_offset = le32_to_cpu(cp_block->checksum_offset);
305	if (crc_offset >= blk_size)
306		goto invalid_cp2;
307
308	crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
309	if (f2fs_crc_valid(crc, cp_block, crc_offset))
310		goto invalid_cp2;
311
312	cur_version = le64_to_cpu(cp_block->checkpoint_ver);
313
314	if (cur_version == pre_version) {
315		*version = cur_version;
316		free(cp_page_2);
317		return cp_page_1;
318	}
319
320invalid_cp2:
321	free(cp_page_2);
322invalid_cp1:
323	free(cp_page_1);
324	return NULL;
325}
326
327int get_valid_checkpoint(struct f2fs_sb_info *sbi)
328{
329	struct f2fs_super_block *raw_sb = sbi->raw_super;
330	void *cp1, *cp2, *cur_page;
331	unsigned long blk_size = sbi->blocksize;
332	unsigned long long cp1_version = 0, cp2_version = 0;
333	unsigned long long cp_start_blk_no;
334	unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
335	int ret;
336
337	sbi->ckpt = malloc(cp_blks * blk_size);
338	if (!sbi->ckpt)
339		return -ENOMEM;
340	/*
341	 * Finding out valid cp block involves read both
342	 * sets( cp pack1 and cp pack 2)
343	 */
344	cp_start_blk_no = le32_to_cpu(raw_sb->cp_blkaddr);
345	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
346
347	/* The second checkpoint pack should start at the next segment */
348	cp_start_blk_no += 1 << le32_to_cpu(raw_sb->log_blocks_per_seg);
349	cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
350
351	if (cp1 && cp2) {
352		if (ver_after(cp2_version, cp1_version)) {
353			cur_page = cp2;
354			sbi->cur_cp = 2;
355		} else {
356			cur_page = cp1;
357			sbi->cur_cp = 1;
358		}
359	} else if (cp1) {
360		cur_page = cp1;
361		sbi->cur_cp = 1;
362	} else if (cp2) {
363		cur_page = cp2;
364		sbi->cur_cp = 2;
365	} else {
366		free(cp1);
367		free(cp2);
368		goto fail_no_cp;
369	}
370
371	memcpy(sbi->ckpt, cur_page, blk_size);
372
373	if (cp_blks > 1) {
374		unsigned int i;
375		unsigned long long cp_blk_no;
376
377		cp_blk_no = le32_to_cpu(raw_sb->cp_blkaddr);
378		if (cur_page == cp2)
379			cp_blk_no += 1 <<
380				le32_to_cpu(raw_sb->log_blocks_per_seg);
381		/* copy sit bitmap */
382		for (i = 1; i < cp_blks; i++) {
383			unsigned char *ckpt = (unsigned char *)sbi->ckpt;
384			ret = dev_read_block(cur_page, cp_blk_no + i);
385			ASSERT(ret >= 0);
386			memcpy(ckpt + i * blk_size, cur_page, blk_size);
387		}
388	}
389	free(cp1);
390	free(cp2);
391	return 0;
392
393fail_no_cp:
394	free(sbi->ckpt);
395	return -EINVAL;
396}
397
398int sanity_check_ckpt(struct f2fs_sb_info *sbi)
399{
400	unsigned int total, fsmeta;
401	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
402	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
403
404	total = le32_to_cpu(raw_super->segment_count);
405	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
406	fsmeta += le32_to_cpu(raw_super->segment_count_sit);
407	fsmeta += le32_to_cpu(raw_super->segment_count_nat);
408	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
409	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
410
411	if (fsmeta >= total)
412		return 1;
413
414	return 0;
415}
416
417int init_node_manager(struct f2fs_sb_info *sbi)
418{
419	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
420	struct f2fs_nm_info *nm_i = NM_I(sbi);
421	unsigned char *version_bitmap;
422	unsigned int nat_segs, nat_blocks;
423
424	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
425
426	/* segment_count_nat includes pair segment so divide to 2. */
427	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
428	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
429	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
430	nm_i->fcnt = 0;
431	nm_i->nat_cnt = 0;
432	nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
433	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
434
435	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
436
437	nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
438	if (!nm_i->nat_bitmap)
439		return -ENOMEM;
440	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
441	if (!version_bitmap)
442		return -EFAULT;
443
444	/* copy version bitmap */
445	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
446	return 0;
447}
448
449int build_node_manager(struct f2fs_sb_info *sbi)
450{
451	int err;
452	sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
453	if (!sbi->nm_info)
454		return -ENOMEM;
455
456	err = init_node_manager(sbi);
457	if (err)
458		return err;
459
460	return 0;
461}
462
463int build_sit_info(struct f2fs_sb_info *sbi)
464{
465	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
466	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
467	struct sit_info *sit_i;
468	unsigned int sit_segs, start;
469	char *src_bitmap, *dst_bitmap;
470	unsigned int bitmap_size;
471
472	sit_i = malloc(sizeof(struct sit_info));
473	if (!sit_i)
474		return -ENOMEM;
475
476	SM_I(sbi)->sit_info = sit_i;
477
478	sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1);
479
480	for (start = 0; start < TOTAL_SEGS(sbi); start++) {
481		sit_i->sentries[start].cur_valid_map
482			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
483		sit_i->sentries[start].ckpt_valid_map
484			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
485		if (!sit_i->sentries[start].cur_valid_map
486				|| !sit_i->sentries[start].ckpt_valid_map)
487			return -ENOMEM;
488	}
489
490	sit_segs = le32_to_cpu(raw_sb->segment_count_sit) >> 1;
491	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
492	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
493
494	dst_bitmap = malloc(bitmap_size);
495	memcpy(dst_bitmap, src_bitmap, bitmap_size);
496
497	sit_i->sit_base_addr = le32_to_cpu(raw_sb->sit_blkaddr);
498	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
499	sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
500	sit_i->sit_bitmap = dst_bitmap;
501	sit_i->bitmap_size = bitmap_size;
502	sit_i->dirty_sentries = 0;
503	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
504	sit_i->elapsed_time = le64_to_cpu(ckpt->elapsed_time);
505	return 0;
506}
507
508void reset_curseg(struct f2fs_sb_info *sbi, int type)
509{
510	struct curseg_info *curseg = CURSEG_I(sbi, type);
511	struct summary_footer *sum_footer;
512	struct seg_entry *se;
513
514	sum_footer = &(curseg->sum_blk->footer);
515	memset(sum_footer, 0, sizeof(struct summary_footer));
516	if (IS_DATASEG(type))
517		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
518	if (IS_NODESEG(type))
519		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
520	se = get_seg_entry(sbi, curseg->segno);
521	se->type = type;
522}
523
524static void read_compacted_summaries(struct f2fs_sb_info *sbi)
525{
526	struct curseg_info *curseg;
527	unsigned int i, j, offset;
528	block_t start;
529	char *kaddr;
530	int ret;
531
532	start = start_sum_block(sbi);
533
534	kaddr = (char *)malloc(PAGE_SIZE);
535	ret = dev_read_block(kaddr, start++);
536	ASSERT(ret >= 0);
537
538	curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
539	memcpy(&curseg->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
540
541	curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
542	memcpy(&curseg->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
543						SUM_JOURNAL_SIZE);
544
545	offset = 2 * SUM_JOURNAL_SIZE;
546	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
547		unsigned short blk_off;
548		struct curseg_info *curseg = CURSEG_I(sbi, i);
549
550		reset_curseg(sbi, i);
551
552		if (curseg->alloc_type == SSR)
553			blk_off = sbi->blocks_per_seg;
554		else
555			blk_off = curseg->next_blkoff;
556
557		for (j = 0; j < blk_off; j++) {
558			struct f2fs_summary *s;
559			s = (struct f2fs_summary *)(kaddr + offset);
560			curseg->sum_blk->entries[j] = *s;
561			offset += SUMMARY_SIZE;
562			if (offset + SUMMARY_SIZE <=
563					PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
564				continue;
565			memset(kaddr, 0, PAGE_SIZE);
566			ret = dev_read_block(kaddr, start++);
567			ASSERT(ret >= 0);
568			offset = 0;
569		}
570	}
571	free(kaddr);
572}
573
574static void restore_node_summary(struct f2fs_sb_info *sbi,
575		unsigned int segno, struct f2fs_summary_block *sum_blk)
576{
577	struct f2fs_node *node_blk;
578	struct f2fs_summary *sum_entry;
579	block_t addr;
580	unsigned int i;
581	int ret;
582
583	node_blk = malloc(F2FS_BLKSIZE);
584	ASSERT(node_blk);
585
586	/* scan the node segment */
587	addr = START_BLOCK(sbi, segno);
588	sum_entry = &sum_blk->entries[0];
589
590	for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
591		ret = dev_read_block(node_blk, addr);
592		ASSERT(ret >= 0);
593		sum_entry->nid = node_blk->footer.nid;
594		addr++;
595	}
596	free(node_blk);
597}
598
599static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
600{
601	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
602	struct f2fs_summary_block *sum_blk;
603	struct curseg_info *curseg;
604	unsigned int segno = 0;
605	block_t blk_addr = 0;
606	int ret;
607
608	if (IS_DATASEG(type)) {
609		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
610		if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
611			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
612		else
613			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
614	} else {
615		segno = le32_to_cpu(ckpt->cur_node_segno[type -
616							CURSEG_HOT_NODE]);
617		if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
618			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
619							type - CURSEG_HOT_NODE);
620		else
621			blk_addr = GET_SUM_BLKADDR(sbi, segno);
622	}
623
624	sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE);
625	ret = dev_read_block(sum_blk, blk_addr);
626	ASSERT(ret >= 0);
627
628	if (IS_NODESEG(type) && !is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
629		restore_node_summary(sbi, segno, sum_blk);
630
631	curseg = CURSEG_I(sbi, type);
632	memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE);
633	reset_curseg(sbi, type);
634	free(sum_blk);
635}
636
637static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
638{
639	int type = CURSEG_HOT_DATA;
640
641	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
642		read_compacted_summaries(sbi);
643		type = CURSEG_HOT_NODE;
644	}
645
646	for (; type <= CURSEG_COLD_NODE; type++)
647		read_normal_summaries(sbi, type);
648}
649
650static void build_curseg(struct f2fs_sb_info *sbi)
651{
652	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
653	struct curseg_info *array;
654	unsigned short blk_off;
655	unsigned int segno;
656	int i;
657
658	array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
659	ASSERT(array);
660
661	SM_I(sbi)->curseg_array = array;
662
663	for (i = 0; i < NR_CURSEG_TYPE; i++) {
664		array[i].sum_blk = malloc(PAGE_CACHE_SIZE);
665		ASSERT(array[i].sum_blk);
666		if (i <= CURSEG_COLD_DATA) {
667			blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
668			segno = le32_to_cpu(ckpt->cur_data_segno[i]);
669		}
670		if (i > CURSEG_COLD_DATA) {
671			blk_off = le16_to_cpu(ckpt->cur_node_blkoff[i -
672							CURSEG_HOT_NODE]);
673			segno = le32_to_cpu(ckpt->cur_node_segno[i -
674							CURSEG_HOT_NODE]);
675		}
676		array[i].segno = segno;
677		array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
678		array[i].next_segno = NULL_SEGNO;
679		array[i].next_blkoff = blk_off;
680		array[i].alloc_type = ckpt->alloc_type[i];
681	}
682	restore_curseg_summaries(sbi);
683}
684
685inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
686{
687	unsigned int end_segno = SM_I(sbi)->segment_count - 1;
688	ASSERT(segno <= end_segno);
689}
690
691static struct f2fs_sit_block *get_current_sit_page(struct f2fs_sb_info *sbi,
692						unsigned int segno)
693{
694	struct sit_info *sit_i = SIT_I(sbi);
695	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
696	block_t blk_addr = sit_i->sit_base_addr + offset;
697	struct f2fs_sit_block *sit_blk = calloc(BLOCK_SZ, 1);
698	int ret;
699
700	check_seg_range(sbi, segno);
701
702	/* calculate sit block address */
703	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
704		blk_addr += sit_i->sit_blocks;
705
706	ret = dev_read_block(sit_blk, blk_addr);
707	ASSERT(ret >= 0);
708
709	return sit_blk;
710}
711
712void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
713			unsigned int segno, struct f2fs_sit_block *sit_blk)
714{
715	struct sit_info *sit_i = SIT_I(sbi);
716	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
717	block_t blk_addr = sit_i->sit_base_addr + offset;
718	int ret;
719
720	/* calculate sit block address */
721	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
722		blk_addr += sit_i->sit_blocks;
723
724	ret = dev_write_block(sit_blk, blk_addr);
725	ASSERT(ret >= 0);
726}
727
728void check_block_count(struct f2fs_sb_info *sbi,
729		unsigned int segno, struct f2fs_sit_entry *raw_sit)
730{
731	struct f2fs_sm_info *sm_info = SM_I(sbi);
732	unsigned int end_segno = sm_info->segment_count - 1;
733	int valid_blocks = 0;
734	unsigned int i;
735
736	/* check segment usage */
737	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
738		ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
739				segno, GET_SIT_VBLOCKS(raw_sit));
740
741	/* check boundary of a given segment number */
742	if (segno > end_segno)
743		ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
744
745	/* check bitmap with valid block count */
746	for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
747		valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
748
749	if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
750		ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
751				segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
752
753	if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
754		ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
755				segno, GET_SIT_TYPE(raw_sit));
756}
757
758void seg_info_from_raw_sit(struct seg_entry *se,
759		struct f2fs_sit_entry *raw_sit)
760{
761	se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
762	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(raw_sit);
763	memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
764	memcpy(se->ckpt_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
765	se->type = GET_SIT_TYPE(raw_sit);
766	se->mtime = le64_to_cpu(raw_sit->mtime);
767}
768
769struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
770		unsigned int segno)
771{
772	struct sit_info *sit_i = SIT_I(sbi);
773	return &sit_i->sentries[segno];
774}
775
776int get_sum_block(struct f2fs_sb_info *sbi, unsigned int segno,
777				struct f2fs_summary_block *sum_blk)
778{
779	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
780	struct curseg_info *curseg;
781	int type, ret;
782	u64 ssa_blk;
783
784	ssa_blk = GET_SUM_BLKADDR(sbi, segno);
785	for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
786		if (segno == ckpt->cur_node_segno[type]) {
787			curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
788			if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
789				ASSERT_MSG("segno [0x%x] indicates a data "
790						"segment, but should be node",
791						segno);
792				return -EINVAL;
793			}
794			memcpy(sum_blk, curseg->sum_blk, BLOCK_SZ);
795			return SEG_TYPE_CUR_NODE;
796		}
797	}
798
799	for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
800		if (segno == ckpt->cur_data_segno[type]) {
801			curseg = CURSEG_I(sbi, type);
802			if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
803				ASSERT_MSG("segno [0x%x] indicates a node "
804						"segment, but should be data",
805						segno);
806				return -EINVAL;
807			}
808			DBG(2, "segno [0x%x] is current data seg[0x%x]\n",
809								segno, type);
810			memcpy(sum_blk, curseg->sum_blk, BLOCK_SZ);
811			return SEG_TYPE_CUR_DATA;
812		}
813	}
814
815	ret = dev_read_block(sum_blk, ssa_blk);
816	ASSERT(ret >= 0);
817
818	if (IS_SUM_NODE_SEG(sum_blk->footer))
819		return SEG_TYPE_NODE;
820	else
821		return SEG_TYPE_DATA;
822
823}
824
825int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
826				struct f2fs_summary *sum_entry)
827{
828	struct f2fs_summary_block *sum_blk;
829	u32 segno, offset;
830	int ret;
831
832	segno = GET_SEGNO(sbi, blk_addr);
833	offset = OFFSET_IN_SEG(sbi, blk_addr);
834
835	sum_blk = calloc(BLOCK_SZ, 1);
836
837	ret = get_sum_block(sbi, segno, sum_blk);
838	memcpy(sum_entry, &(sum_blk->entries[offset]),
839				sizeof(struct f2fs_summary));
840	free(sum_blk);
841	return ret;
842}
843
844static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
845				struct f2fs_nat_entry *raw_nat)
846{
847	struct f2fs_nm_info *nm_i = NM_I(sbi);
848	struct f2fs_nat_block *nat_block;
849	pgoff_t block_off;
850	pgoff_t block_addr;
851	int seg_off, entry_off;
852	int ret;
853
854	if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
855		return;
856
857	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
858
859	block_off = nid / NAT_ENTRY_PER_BLOCK;
860	entry_off = nid % NAT_ENTRY_PER_BLOCK;
861
862	seg_off = block_off >> sbi->log_blocks_per_seg;
863	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
864			(seg_off << sbi->log_blocks_per_seg << 1) +
865			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
866
867	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
868		block_addr += sbi->blocks_per_seg;
869
870	ret = dev_read_block(nat_block, block_addr);
871	ASSERT(ret >= 0);
872
873	memcpy(raw_nat, &nat_block->entries[entry_off],
874					sizeof(struct f2fs_nat_entry));
875	free(nat_block);
876}
877
878void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
879{
880	struct f2fs_nat_entry raw_nat;
881	get_nat_entry(sbi, nid, &raw_nat);
882	ni->nid = nid;
883	node_info_from_raw_nat(ni, &raw_nat);
884}
885
886void build_sit_entries(struct f2fs_sb_info *sbi)
887{
888	struct sit_info *sit_i = SIT_I(sbi);
889	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
890	struct f2fs_summary_block *sum = curseg->sum_blk;
891	unsigned int segno;
892
893	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
894		struct seg_entry *se = &sit_i->sentries[segno];
895		struct f2fs_sit_block *sit_blk;
896		struct f2fs_sit_entry sit;
897		int i;
898
899		for (i = 0; i < sits_in_cursum(sum); i++) {
900			if (le32_to_cpu(segno_in_journal(sum, i)) == segno) {
901				sit = sit_in_journal(sum, i);
902				goto got_it;
903			}
904		}
905		sit_blk = get_current_sit_page(sbi, segno);
906		sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
907		free(sit_blk);
908got_it:
909		check_block_count(sbi, segno, &sit);
910		seg_info_from_raw_sit(se, &sit);
911	}
912
913}
914
915int build_segment_manager(struct f2fs_sb_info *sbi)
916{
917	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
918	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
919	struct f2fs_sm_info *sm_info;
920
921	sm_info = malloc(sizeof(struct f2fs_sm_info));
922	if (!sm_info)
923		return -ENOMEM;
924
925	/* init sm info */
926	sbi->sm_info = sm_info;
927	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
928	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
929	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
930	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
931	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
932	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
933	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
934
935	build_sit_info(sbi);
936
937	build_curseg(sbi);
938
939	build_sit_entries(sbi);
940
941	return 0;
942}
943
944void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
945{
946	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
947	struct f2fs_sm_info *sm_i = SM_I(sbi);
948	unsigned int segno = 0;
949	char *ptr = NULL;
950	u32 sum_vblocks = 0;
951	u32 free_segs = 0;
952	struct seg_entry *se;
953
954	fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
955	fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
956	ptr = fsck->sit_area_bitmap;
957
958	ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
959
960	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
961		se = get_seg_entry(sbi, segno);
962
963		memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
964		ptr += SIT_VBLOCK_MAP_SIZE;
965
966		if (se->valid_blocks == 0x0) {
967			if (sbi->ckpt->cur_node_segno[0] == segno ||
968					sbi->ckpt->cur_data_segno[0] == segno ||
969					sbi->ckpt->cur_node_segno[1] == segno ||
970					sbi->ckpt->cur_data_segno[1] == segno ||
971					sbi->ckpt->cur_node_segno[2] == segno ||
972					sbi->ckpt->cur_data_segno[2] == segno) {
973				continue;
974			} else {
975				free_segs++;
976			}
977		} else {
978			sum_vblocks += se->valid_blocks;
979		}
980	}
981	fsck->chk.sit_valid_blocks = sum_vblocks;
982	fsck->chk.sit_free_segs = free_segs;
983
984	DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
985			sum_vblocks, sum_vblocks,
986			free_segs, free_segs);
987}
988
989void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
990{
991	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
992	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
993	struct sit_info *sit_i = SIT_I(sbi);
994	unsigned int segno = 0;
995	struct f2fs_summary_block *sum = curseg->sum_blk;
996	char *ptr = NULL;
997
998	/* remove sit journal */
999	sum->n_sits = 0;
1000
1001	fsck->chk.free_segs = 0;
1002
1003	ptr = fsck->main_area_bitmap;
1004
1005	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1006		struct f2fs_sit_block *sit_blk;
1007		struct f2fs_sit_entry *sit;
1008		struct seg_entry *se;
1009		u16 valid_blocks = 0;
1010		u16 type;
1011		int i;
1012
1013		sit_blk = get_current_sit_page(sbi, segno);
1014		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1015		memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
1016
1017		/* update valid block count */
1018		for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1019			valid_blocks += get_bits_in_byte(sit->valid_map[i]);
1020
1021		se = get_seg_entry(sbi, segno);
1022		type = se->type;
1023		if (type >= NO_CHECK_TYPE) {
1024			ASSERT(valid_blocks);
1025			type = 0;
1026		}
1027		sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
1028								valid_blocks);
1029		rewrite_current_sit_page(sbi, segno, sit_blk);
1030		free(sit_blk);
1031
1032		if (valid_blocks == 0 &&
1033				sbi->ckpt->cur_node_segno[0] != segno &&
1034				sbi->ckpt->cur_data_segno[0] != segno &&
1035				sbi->ckpt->cur_node_segno[1] != segno &&
1036				sbi->ckpt->cur_data_segno[1] != segno &&
1037				sbi->ckpt->cur_node_segno[2] != segno &&
1038				sbi->ckpt->cur_data_segno[2] != segno)
1039			fsck->chk.free_segs++;
1040
1041		ptr += SIT_VBLOCK_MAP_SIZE;
1042	}
1043}
1044
1045int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
1046					struct f2fs_nat_entry *raw_nat)
1047{
1048	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1049	struct f2fs_summary_block *sum = curseg->sum_blk;
1050	int i = 0;
1051
1052	for (i = 0; i < nats_in_cursum(sum); i++) {
1053		if (le32_to_cpu(nid_in_journal(sum, i)) == nid) {
1054			memcpy(raw_nat, &nat_in_journal(sum, i),
1055						sizeof(struct f2fs_nat_entry));
1056			DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
1057			return i;
1058		}
1059	}
1060	return -1;
1061}
1062
1063void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
1064{
1065	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1066	struct f2fs_summary_block *sum = curseg->sum_blk;
1067	struct f2fs_nm_info *nm_i = NM_I(sbi);
1068	struct f2fs_nat_block *nat_block;
1069	pgoff_t block_off;
1070	pgoff_t block_addr;
1071	int seg_off, entry_off;
1072	int ret;
1073	int i = 0;
1074
1075	/* check in journal */
1076	for (i = 0; i < nats_in_cursum(sum); i++) {
1077		if (le32_to_cpu(nid_in_journal(sum, i)) == nid) {
1078			memset(&nat_in_journal(sum, i), 0,
1079					sizeof(struct f2fs_nat_entry));
1080			FIX_MSG("Remove nid [0x%x] in nat journal\n", nid);
1081			return;
1082		}
1083	}
1084	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1085
1086	block_off = nid / NAT_ENTRY_PER_BLOCK;
1087	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1088
1089	seg_off = block_off >> sbi->log_blocks_per_seg;
1090	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1091			(seg_off << sbi->log_blocks_per_seg << 1) +
1092			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
1093
1094	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
1095		block_addr += sbi->blocks_per_seg;
1096
1097	ret = dev_read_block(nat_block, block_addr);
1098	ASSERT(ret >= 0);
1099
1100	memset(&nat_block->entries[entry_off], 0,
1101					sizeof(struct f2fs_nat_entry));
1102
1103	ret = dev_write_block(nat_block, block_addr);
1104	ASSERT(ret >= 0);
1105	free(nat_block);
1106}
1107
1108void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
1109{
1110	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1111	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1112	struct f2fs_nm_info *nm_i = NM_I(sbi);
1113	struct f2fs_nat_block *nat_block;
1114	u32 nid, nr_nat_blks;
1115	pgoff_t block_off;
1116	pgoff_t block_addr;
1117	int seg_off;
1118	int ret;
1119	unsigned int i;
1120
1121	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1122	ASSERT(nat_block);
1123
1124	/* Alloc & build nat entry bitmap */
1125	nr_nat_blks = (le32_to_cpu(raw_sb->segment_count_nat) / 2) <<
1126						sbi->log_blocks_per_seg;
1127
1128	fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
1129	fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
1130	fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
1131	ASSERT(fsck->nat_area_bitmap != NULL);
1132
1133	for (block_off = 0; block_off < nr_nat_blks; block_off++) {
1134
1135		seg_off = block_off >> sbi->log_blocks_per_seg;
1136		block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1137			(seg_off << sbi->log_blocks_per_seg << 1) +
1138			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
1139
1140		if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
1141			block_addr += sbi->blocks_per_seg;
1142
1143		ret = dev_read_block(nat_block, block_addr);
1144		ASSERT(ret >= 0);
1145
1146		nid = block_off * NAT_ENTRY_PER_BLOCK;
1147		for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
1148			struct f2fs_nat_entry raw_nat;
1149			struct node_info ni;
1150			ni.nid = nid + i;
1151
1152			if ((nid + i) == F2FS_NODE_INO(sbi) ||
1153					(nid + i) == F2FS_META_INO(sbi)) {
1154				ASSERT(nat_block->entries[i].block_addr != 0x0);
1155				continue;
1156			}
1157
1158			if (lookup_nat_in_journal(sbi, nid + i,
1159							&raw_nat) >= 0) {
1160				node_info_from_raw_nat(&ni, &raw_nat);
1161				if (ni.blk_addr != 0x0) {
1162					f2fs_set_bit(nid + i,
1163							fsck->nat_area_bitmap);
1164					fsck->chk.valid_nat_entry_cnt++;
1165					DBG(3, "nid[0x%x] in nat cache\n",
1166								nid + i);
1167				}
1168			} else {
1169				node_info_from_raw_nat(&ni,
1170						&nat_block->entries[i]);
1171				if (ni.blk_addr == 0)
1172					continue;
1173				ASSERT(nid + i != 0x0);
1174
1175				DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
1176					nid + i, ni.blk_addr, ni.ino);
1177				f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
1178				fsck->chk.valid_nat_entry_cnt++;
1179			}
1180		}
1181	}
1182	free(nat_block);
1183
1184	DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
1185			fsck->chk.valid_nat_entry_cnt,
1186			fsck->chk.valid_nat_entry_cnt);
1187}
1188
1189int f2fs_do_mount(struct f2fs_sb_info *sbi)
1190{
1191	int ret;
1192
1193	sbi->active_logs = NR_CURSEG_TYPE;
1194	ret = validate_super_block(sbi, 0);
1195	if (ret) {
1196		ret = validate_super_block(sbi, 1);
1197		if (ret)
1198			return -1;
1199	}
1200
1201	print_raw_sb_info(sbi);
1202
1203	init_sb_info(sbi);
1204
1205	ret = get_valid_checkpoint(sbi);
1206	if (ret) {
1207		ERR_MSG("Can't find valid checkpoint\n");
1208		return -1;
1209	}
1210
1211	if (sanity_check_ckpt(sbi)) {
1212		ERR_MSG("Checkpoint is polluted\n");
1213		return -1;
1214	}
1215
1216	print_ckpt_info(sbi);
1217
1218	if (config.auto_fix) {
1219		u32 flag = le32_to_cpu(sbi->ckpt->ckpt_flags);
1220
1221		if (flag & CP_FSCK_FLAG)
1222			config.fix_on = 1;
1223		else
1224			return 1;
1225	}
1226
1227	config.bug_on = 0;
1228
1229	sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count);
1230	sbi->total_valid_inode_count =
1231			le32_to_cpu(sbi->ckpt->valid_inode_count);
1232	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
1233	sbi->total_valid_block_count =
1234			le64_to_cpu(sbi->ckpt->valid_block_count);
1235	sbi->last_valid_block_count = sbi->total_valid_block_count;
1236	sbi->alloc_valid_block_count = 0;
1237
1238	if (build_segment_manager(sbi)) {
1239		ERR_MSG("build_segment_manager failed\n");
1240		return -1;
1241	}
1242
1243	if (build_node_manager(sbi)) {
1244		ERR_MSG("build_segment_manager failed\n");
1245		return -1;
1246	}
1247
1248	return 0;
1249}
1250
1251void f2fs_do_umount(struct f2fs_sb_info *sbi)
1252{
1253	struct sit_info *sit_i = SIT_I(sbi);
1254	struct f2fs_sm_info *sm_i = SM_I(sbi);
1255	struct f2fs_nm_info *nm_i = NM_I(sbi);
1256	unsigned int i;
1257
1258	/* free nm_info */
1259	free(nm_i->nat_bitmap);
1260	free(sbi->nm_info);
1261
1262	/* free sit_info */
1263	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
1264		free(sit_i->sentries[i].cur_valid_map);
1265		free(sit_i->sentries[i].ckpt_valid_map);
1266	}
1267	free(sit_i->sit_bitmap);
1268	free(sm_i->sit_info);
1269
1270	/* free sm_info */
1271	for (i = 0; i < NR_CURSEG_TYPE; i++)
1272		free(sm_i->curseg_array[i].sum_blk);
1273
1274	free(sm_i->curseg_array);
1275	free(sbi->sm_info);
1276
1277	free(sbi->ckpt);
1278	free(sbi->raw_super);
1279}
1280