mount.c revision 4c992fe23ae6739767f584a96157d0585282d8e2
1/**
2 * mount.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 *             http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include "fsck.h"
12
13void print_inode_info(struct f2fs_inode *inode)
14{
15	unsigned int i = 0;
16	int namelen = le32_to_cpu(inode->i_namelen);
17
18	DISP_u32(inode, i_mode);
19	DISP_u32(inode, i_uid);
20	DISP_u32(inode, i_gid);
21	DISP_u32(inode, i_links);
22	DISP_u64(inode, i_size);
23	DISP_u64(inode, i_blocks);
24
25	DISP_u64(inode, i_atime);
26	DISP_u32(inode, i_atime_nsec);
27	DISP_u64(inode, i_ctime);
28	DISP_u32(inode, i_ctime_nsec);
29	DISP_u64(inode, i_mtime);
30	DISP_u32(inode, i_mtime_nsec);
31
32	DISP_u32(inode, i_generation);
33	DISP_u32(inode, i_current_depth);
34	DISP_u32(inode, i_xattr_nid);
35	DISP_u32(inode, i_flags);
36	DISP_u32(inode, i_pino);
37
38	if (namelen) {
39		DISP_u32(inode, i_namelen);
40		inode->i_name[namelen] = '\0';
41		DISP_utf(inode, i_name);
42	}
43
44	printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
45			inode->i_ext.fofs,
46			inode->i_ext.blk_addr,
47			inode->i_ext.len);
48
49	DISP_u32(inode, i_addr[0]);	/* Pointers to data blocks */
50	DISP_u32(inode, i_addr[1]);	/* Pointers to data blocks */
51	DISP_u32(inode, i_addr[2]);	/* Pointers to data blocks */
52	DISP_u32(inode, i_addr[3]);	/* Pointers to data blocks */
53
54	for (i = 4; i < ADDRS_PER_INODE(inode); i++) {
55		if (inode->i_addr[i] != 0x0) {
56			printf("i_addr[0x%x] points data block\r\t\t[0x%4x]\n",
57					i, inode->i_addr[i]);
58			break;
59		}
60	}
61
62	DISP_u32(inode, i_nid[0]);	/* direct */
63	DISP_u32(inode, i_nid[1]);	/* direct */
64	DISP_u32(inode, i_nid[2]);	/* indirect */
65	DISP_u32(inode, i_nid[3]);	/* indirect */
66	DISP_u32(inode, i_nid[4]);	/* double indirect */
67
68	printf("\n");
69}
70
71void print_node_info(struct f2fs_node *node_block)
72{
73	nid_t ino = le32_to_cpu(node_block->footer.ino);
74	nid_t nid = le32_to_cpu(node_block->footer.nid);
75	/* Is this inode? */
76	if (ino == nid) {
77		DBG(0, "Node ID [0x%x:%u] is inode\n", nid, nid);
78		print_inode_info(&node_block->i);
79	} else {
80		int i;
81		u32 *dump_blk = (u32 *)node_block;
82		DBG(0, "Node ID [0x%x:%u] is direct node or indirect node.\n",
83								nid, nid);
84		for (i = 0; i <= 10; i++)
85			MSG(0, "[%d]\t\t\t[0x%8x : %d]\n",
86						i, dump_blk[i], dump_blk[i]);
87	}
88}
89
90void print_raw_sb_info(struct f2fs_sb_info *sbi)
91{
92	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
93
94	if (!config.dbg_lv)
95		return;
96
97	printf("\n");
98	printf("+--------------------------------------------------------+\n");
99	printf("| Super block                                            |\n");
100	printf("+--------------------------------------------------------+\n");
101
102	DISP_u32(sb, magic);
103	DISP_u32(sb, major_ver);
104	DISP_u32(sb, minor_ver);
105	DISP_u32(sb, log_sectorsize);
106	DISP_u32(sb, log_sectors_per_block);
107
108	DISP_u32(sb, log_blocksize);
109	DISP_u32(sb, log_blocks_per_seg);
110	DISP_u32(sb, segs_per_sec);
111	DISP_u32(sb, secs_per_zone);
112	DISP_u32(sb, checksum_offset);
113	DISP_u64(sb, block_count);
114
115	DISP_u32(sb, section_count);
116	DISP_u32(sb, segment_count);
117	DISP_u32(sb, segment_count_ckpt);
118	DISP_u32(sb, segment_count_sit);
119	DISP_u32(sb, segment_count_nat);
120
121	DISP_u32(sb, segment_count_ssa);
122	DISP_u32(sb, segment_count_main);
123	DISP_u32(sb, segment0_blkaddr);
124
125	DISP_u32(sb, cp_blkaddr);
126	DISP_u32(sb, sit_blkaddr);
127	DISP_u32(sb, nat_blkaddr);
128	DISP_u32(sb, ssa_blkaddr);
129	DISP_u32(sb, main_blkaddr);
130
131	DISP_u32(sb, root_ino);
132	DISP_u32(sb, node_ino);
133	DISP_u32(sb, meta_ino);
134	DISP_u32(sb, cp_payload);
135	printf("\n");
136}
137
138void print_ckpt_info(struct f2fs_sb_info *sbi)
139{
140	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
141
142	if (!config.dbg_lv)
143		return;
144
145	printf("\n");
146	printf("+--------------------------------------------------------+\n");
147	printf("| Checkpoint                                             |\n");
148	printf("+--------------------------------------------------------+\n");
149
150	DISP_u64(cp, checkpoint_ver);
151	DISP_u64(cp, user_block_count);
152	DISP_u64(cp, valid_block_count);
153	DISP_u32(cp, rsvd_segment_count);
154	DISP_u32(cp, overprov_segment_count);
155	DISP_u32(cp, free_segment_count);
156
157	DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
158	DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
159	DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
160	DISP_u32(cp, cur_node_segno[0]);
161	DISP_u32(cp, cur_node_segno[1]);
162	DISP_u32(cp, cur_node_segno[2]);
163
164	DISP_u32(cp, cur_node_blkoff[0]);
165	DISP_u32(cp, cur_node_blkoff[1]);
166	DISP_u32(cp, cur_node_blkoff[2]);
167
168
169	DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
170	DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
171	DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
172	DISP_u32(cp, cur_data_segno[0]);
173	DISP_u32(cp, cur_data_segno[1]);
174	DISP_u32(cp, cur_data_segno[2]);
175
176	DISP_u32(cp, cur_data_blkoff[0]);
177	DISP_u32(cp, cur_data_blkoff[1]);
178	DISP_u32(cp, cur_data_blkoff[2]);
179
180	DISP_u32(cp, ckpt_flags);
181	DISP_u32(cp, cp_pack_total_block_count);
182	DISP_u32(cp, cp_pack_start_sum);
183	DISP_u32(cp, valid_node_count);
184	DISP_u32(cp, valid_inode_count);
185	DISP_u32(cp, next_free_nid);
186	DISP_u32(cp, sit_ver_bitmap_bytesize);
187	DISP_u32(cp, nat_ver_bitmap_bytesize);
188	DISP_u32(cp, checksum_offset);
189	DISP_u64(cp, elapsed_time);
190
191	DISP_u32(cp, sit_nat_version_bitmap[0]);
192	printf("\n\n");
193}
194
195int sanity_check_raw_super(struct f2fs_super_block *raw_super)
196{
197	unsigned int blocksize;
198
199	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
200		return -1;
201	}
202
203	if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
204		return -1;
205	}
206
207	blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
208	if (F2FS_BLKSIZE != blocksize) {
209		return -1;
210	}
211
212	if (F2FS_LOG_SECTOR_SIZE != le32_to_cpu(raw_super->log_sectorsize)) {
213		return -1;
214	}
215
216	if (F2FS_LOG_SECTORS_PER_BLOCK !=
217				le32_to_cpu(raw_super->log_sectors_per_block)) {
218		return -1;
219	}
220
221	return 0;
222}
223
224int validate_super_block(struct f2fs_sb_info *sbi, int block)
225{
226	u64 offset;
227	sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
228
229	if (block == 0)
230		offset = F2FS_SUPER_OFFSET;
231	else
232		offset = F2FS_BLKSIZE + F2FS_SUPER_OFFSET;
233
234	if (dev_read(sbi->raw_super, offset, sizeof(struct f2fs_super_block)))
235		return -1;
236
237	if (!sanity_check_raw_super(sbi->raw_super))
238		return 0;
239
240	free(sbi->raw_super);
241	MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", block);
242
243	return -EINVAL;
244}
245
246int init_sb_info(struct f2fs_sb_info *sbi)
247{
248	struct f2fs_super_block *raw_super = sbi->raw_super;
249
250	sbi->log_sectors_per_block =
251		le32_to_cpu(raw_super->log_sectors_per_block);
252	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
253	sbi->blocksize = 1 << sbi->log_blocksize;
254	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
255	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
256	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
257	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
258	sbi->total_sections = le32_to_cpu(raw_super->section_count);
259	sbi->total_node_count =
260		(le32_to_cpu(raw_super->segment_count_nat) / 2)
261		* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
262	sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
263	sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
264	sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
265	sbi->cur_victim_sec = NULL_SEGNO;
266	return 0;
267}
268
269void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
270				unsigned long long *version)
271{
272	void *cp_page_1, *cp_page_2;
273	struct f2fs_checkpoint *cp_block;
274	unsigned long blk_size = sbi->blocksize;
275	unsigned long long cur_version = 0, pre_version = 0;
276	unsigned int crc = 0;
277	size_t crc_offset;
278
279	/* Read the 1st cp block in this CP pack */
280	cp_page_1 = malloc(PAGE_SIZE);
281	if (dev_read_block(cp_page_1, cp_addr) < 0)
282		return NULL;
283
284	cp_block = (struct f2fs_checkpoint *)cp_page_1;
285	crc_offset = le32_to_cpu(cp_block->checksum_offset);
286	if (crc_offset >= blk_size)
287		goto invalid_cp1;
288
289	crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
290	if (f2fs_crc_valid(crc, cp_block, crc_offset))
291		goto invalid_cp1;
292
293	pre_version = le64_to_cpu(cp_block->checkpoint_ver);
294
295	/* Read the 2nd cp block in this CP pack */
296	cp_page_2 = malloc(PAGE_SIZE);
297	cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
298
299	if (dev_read_block(cp_page_2, cp_addr) < 0)
300		goto invalid_cp2;
301
302	cp_block = (struct f2fs_checkpoint *)cp_page_2;
303	crc_offset = le32_to_cpu(cp_block->checksum_offset);
304	if (crc_offset >= blk_size)
305		goto invalid_cp2;
306
307	crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
308	if (f2fs_crc_valid(crc, cp_block, crc_offset))
309		goto invalid_cp2;
310
311	cur_version = le64_to_cpu(cp_block->checkpoint_ver);
312
313	if (cur_version == pre_version) {
314		*version = cur_version;
315		free(cp_page_2);
316		return cp_page_1;
317	}
318
319invalid_cp2:
320	free(cp_page_2);
321invalid_cp1:
322	free(cp_page_1);
323	return NULL;
324}
325
326int get_valid_checkpoint(struct f2fs_sb_info *sbi)
327{
328	struct f2fs_super_block *raw_sb = sbi->raw_super;
329	void *cp1, *cp2, *cur_page;
330	unsigned long blk_size = sbi->blocksize;
331	unsigned long long cp1_version = 0, cp2_version = 0;
332	unsigned long long cp_start_blk_no;
333	unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
334	int ret;
335
336	sbi->ckpt = malloc(cp_blks * blk_size);
337	if (!sbi->ckpt)
338		return -ENOMEM;
339	/*
340	 * Finding out valid cp block involves read both
341	 * sets( cp pack1 and cp pack 2)
342	 */
343	cp_start_blk_no = le32_to_cpu(raw_sb->cp_blkaddr);
344	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
345
346	/* The second checkpoint pack should start at the next segment */
347	cp_start_blk_no += 1 << le32_to_cpu(raw_sb->log_blocks_per_seg);
348	cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
349
350	if (cp1 && cp2) {
351		if (ver_after(cp2_version, cp1_version)) {
352			cur_page = cp2;
353			sbi->cur_cp = 2;
354		} else {
355			cur_page = cp1;
356			sbi->cur_cp = 1;
357		}
358	} else if (cp1) {
359		cur_page = cp1;
360		sbi->cur_cp = 1;
361	} else if (cp2) {
362		cur_page = cp2;
363		sbi->cur_cp = 2;
364	} else {
365		free(cp1);
366		free(cp2);
367		goto fail_no_cp;
368	}
369
370	memcpy(sbi->ckpt, cur_page, blk_size);
371
372	if (cp_blks > 1) {
373		unsigned int i;
374		unsigned long long cp_blk_no;
375
376		cp_blk_no = le32_to_cpu(raw_sb->cp_blkaddr);
377		if (cur_page == cp2)
378			cp_blk_no += 1 <<
379				le32_to_cpu(raw_sb->log_blocks_per_seg);
380		/* copy sit bitmap */
381		for (i = 1; i < cp_blks; i++) {
382			unsigned char *ckpt = (unsigned char *)sbi->ckpt;
383			ret = dev_read_block(cur_page, cp_blk_no + i);
384			ASSERT(ret >= 0);
385			memcpy(ckpt + i * blk_size, cur_page, blk_size);
386		}
387	}
388	free(cp1);
389	free(cp2);
390	return 0;
391
392fail_no_cp:
393	free(sbi->ckpt);
394	return -EINVAL;
395}
396
397int sanity_check_ckpt(struct f2fs_sb_info *sbi)
398{
399	unsigned int total, fsmeta;
400	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
401	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
402
403	total = le32_to_cpu(raw_super->segment_count);
404	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
405	fsmeta += le32_to_cpu(raw_super->segment_count_sit);
406	fsmeta += le32_to_cpu(raw_super->segment_count_nat);
407	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
408	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
409
410	if (fsmeta >= total)
411		return 1;
412
413	return 0;
414}
415
416int init_node_manager(struct f2fs_sb_info *sbi)
417{
418	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
419	struct f2fs_nm_info *nm_i = NM_I(sbi);
420	unsigned char *version_bitmap;
421	unsigned int nat_segs, nat_blocks;
422
423	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
424
425	/* segment_count_nat includes pair segment so divide to 2. */
426	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
427	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
428	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
429	nm_i->fcnt = 0;
430	nm_i->nat_cnt = 0;
431	nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
432	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
433
434	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
435
436	nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
437	if (!nm_i->nat_bitmap)
438		return -ENOMEM;
439	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
440	if (!version_bitmap)
441		return -EFAULT;
442
443	/* copy version bitmap */
444	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
445	return 0;
446}
447
448int build_node_manager(struct f2fs_sb_info *sbi)
449{
450	int err;
451	sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
452	if (!sbi->nm_info)
453		return -ENOMEM;
454
455	err = init_node_manager(sbi);
456	if (err)
457		return err;
458
459	return 0;
460}
461
462int build_sit_info(struct f2fs_sb_info *sbi)
463{
464	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
465	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
466	struct sit_info *sit_i;
467	unsigned int sit_segs, start;
468	char *src_bitmap, *dst_bitmap;
469	unsigned int bitmap_size;
470
471	sit_i = malloc(sizeof(struct sit_info));
472	if (!sit_i)
473		return -ENOMEM;
474
475	SM_I(sbi)->sit_info = sit_i;
476
477	sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1);
478
479	for (start = 0; start < TOTAL_SEGS(sbi); start++) {
480		sit_i->sentries[start].cur_valid_map
481			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
482		sit_i->sentries[start].ckpt_valid_map
483			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
484		if (!sit_i->sentries[start].cur_valid_map
485				|| !sit_i->sentries[start].ckpt_valid_map)
486			return -ENOMEM;
487	}
488
489	sit_segs = le32_to_cpu(raw_sb->segment_count_sit) >> 1;
490	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
491	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
492
493	dst_bitmap = malloc(bitmap_size);
494	memcpy(dst_bitmap, src_bitmap, bitmap_size);
495
496	sit_i->sit_base_addr = le32_to_cpu(raw_sb->sit_blkaddr);
497	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
498	sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
499	sit_i->sit_bitmap = dst_bitmap;
500	sit_i->bitmap_size = bitmap_size;
501	sit_i->dirty_sentries = 0;
502	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
503	sit_i->elapsed_time = le64_to_cpu(ckpt->elapsed_time);
504	return 0;
505}
506
507void reset_curseg(struct f2fs_sb_info *sbi, int type)
508{
509	struct curseg_info *curseg = CURSEG_I(sbi, type);
510	struct summary_footer *sum_footer;
511	struct seg_entry *se;
512
513	sum_footer = &(curseg->sum_blk->footer);
514	memset(sum_footer, 0, sizeof(struct summary_footer));
515	if (IS_DATASEG(type))
516		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
517	if (IS_NODESEG(type))
518		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
519	se = get_seg_entry(sbi, curseg->segno);
520	se->type = type;
521}
522
523static void read_compacted_summaries(struct f2fs_sb_info *sbi)
524{
525	struct curseg_info *curseg;
526	unsigned int i, j, offset;
527	block_t start;
528	char *kaddr;
529	int ret;
530
531	start = start_sum_block(sbi);
532
533	kaddr = (char *)malloc(PAGE_SIZE);
534	ret = dev_read_block(kaddr, start++);
535	ASSERT(ret >= 0);
536
537	curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
538	memcpy(&curseg->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
539
540	curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
541	memcpy(&curseg->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
542						SUM_JOURNAL_SIZE);
543
544	offset = 2 * SUM_JOURNAL_SIZE;
545	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
546		unsigned short blk_off;
547		struct curseg_info *curseg = CURSEG_I(sbi, i);
548
549		reset_curseg(sbi, i);
550
551		if (curseg->alloc_type == SSR)
552			blk_off = sbi->blocks_per_seg;
553		else
554			blk_off = curseg->next_blkoff;
555
556		for (j = 0; j < blk_off; j++) {
557			struct f2fs_summary *s;
558			s = (struct f2fs_summary *)(kaddr + offset);
559			curseg->sum_blk->entries[j] = *s;
560			offset += SUMMARY_SIZE;
561			if (offset + SUMMARY_SIZE <=
562					PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
563				continue;
564			memset(kaddr, 0, PAGE_SIZE);
565			ret = dev_read_block(kaddr, start++);
566			ASSERT(ret >= 0);
567			offset = 0;
568		}
569	}
570	free(kaddr);
571}
572
573static void restore_node_summary(struct f2fs_sb_info *sbi,
574		unsigned int segno, struct f2fs_summary_block *sum_blk)
575{
576	struct f2fs_node *node_blk;
577	struct f2fs_summary *sum_entry;
578	block_t addr;
579	unsigned int i;
580	int ret;
581
582	node_blk = malloc(F2FS_BLKSIZE);
583	ASSERT(node_blk);
584
585	/* scan the node segment */
586	addr = START_BLOCK(sbi, segno);
587	sum_entry = &sum_blk->entries[0];
588
589	for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
590		ret = dev_read_block(node_blk, addr);
591		ASSERT(ret >= 0);
592		sum_entry->nid = node_blk->footer.nid;
593		addr++;
594	}
595	free(node_blk);
596}
597
598static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
599{
600	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
601	struct f2fs_summary_block *sum_blk;
602	struct curseg_info *curseg;
603	unsigned int segno = 0;
604	block_t blk_addr = 0;
605	int ret;
606
607	if (IS_DATASEG(type)) {
608		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
609		if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
610			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
611		else
612			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
613	} else {
614		segno = le32_to_cpu(ckpt->cur_node_segno[type -
615							CURSEG_HOT_NODE]);
616		if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
617			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
618							type - CURSEG_HOT_NODE);
619		else
620			blk_addr = GET_SUM_BLKADDR(sbi, segno);
621	}
622
623	sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE);
624	ret = dev_read_block(sum_blk, blk_addr);
625	ASSERT(ret >= 0);
626
627	if (IS_NODESEG(type) && !is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
628		restore_node_summary(sbi, segno, sum_blk);
629
630	curseg = CURSEG_I(sbi, type);
631	memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE);
632	reset_curseg(sbi, type);
633	free(sum_blk);
634}
635
636static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
637{
638	int type = CURSEG_HOT_DATA;
639
640	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
641		read_compacted_summaries(sbi);
642		type = CURSEG_HOT_NODE;
643	}
644
645	for (; type <= CURSEG_COLD_NODE; type++)
646		read_normal_summaries(sbi, type);
647}
648
649static void build_curseg(struct f2fs_sb_info *sbi)
650{
651	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
652	struct curseg_info *array;
653	unsigned short blk_off;
654	unsigned int segno;
655	int i;
656
657	array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
658	ASSERT(array);
659
660	SM_I(sbi)->curseg_array = array;
661
662	for (i = 0; i < NR_CURSEG_TYPE; i++) {
663		array[i].sum_blk = malloc(PAGE_CACHE_SIZE);
664		ASSERT(array[i].sum_blk);
665		if (i <= CURSEG_COLD_DATA) {
666			blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
667			segno = le32_to_cpu(ckpt->cur_data_segno[i]);
668		}
669		if (i > CURSEG_COLD_DATA) {
670			blk_off = le16_to_cpu(ckpt->cur_node_blkoff[i -
671							CURSEG_HOT_NODE]);
672			segno = le32_to_cpu(ckpt->cur_node_segno[i -
673							CURSEG_HOT_NODE]);
674		}
675		array[i].segno = segno;
676		array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
677		array[i].next_segno = NULL_SEGNO;
678		array[i].next_blkoff = blk_off;
679		array[i].alloc_type = ckpt->alloc_type[i];
680	}
681	restore_curseg_summaries(sbi);
682}
683
684inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
685{
686	unsigned int end_segno = SM_I(sbi)->segment_count - 1;
687	ASSERT(segno <= end_segno);
688}
689
690static struct f2fs_sit_block *get_current_sit_page(struct f2fs_sb_info *sbi,
691						unsigned int segno)
692{
693	struct sit_info *sit_i = SIT_I(sbi);
694	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
695	block_t blk_addr = sit_i->sit_base_addr + offset;
696	struct f2fs_sit_block *sit_blk = calloc(BLOCK_SZ, 1);
697	int ret;
698
699	check_seg_range(sbi, segno);
700
701	/* calculate sit block address */
702	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
703		blk_addr += sit_i->sit_blocks;
704
705	ret = dev_read_block(sit_blk, blk_addr);
706	ASSERT(ret >= 0);
707
708	return sit_blk;
709}
710
711void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
712			unsigned int segno, struct f2fs_sit_block *sit_blk)
713{
714	struct sit_info *sit_i = SIT_I(sbi);
715	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
716	block_t blk_addr = sit_i->sit_base_addr + offset;
717	int ret;
718
719	/* calculate sit block address */
720	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
721		blk_addr += sit_i->sit_blocks;
722
723	ret = dev_write_block(sit_blk, blk_addr);
724	ASSERT(ret >= 0);
725}
726
727void check_block_count(struct f2fs_sb_info *sbi,
728		unsigned int segno, struct f2fs_sit_entry *raw_sit)
729{
730	struct f2fs_sm_info *sm_info = SM_I(sbi);
731	unsigned int end_segno = sm_info->segment_count - 1;
732	int valid_blocks = 0;
733	unsigned int i;
734
735	/* check segment usage */
736	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
737		ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
738				segno, GET_SIT_VBLOCKS(raw_sit));
739
740	/* check boundary of a given segment number */
741	if (segno > end_segno)
742		ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
743
744	/* check bitmap with valid block count */
745	for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
746		valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
747
748	if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
749		ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
750				segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
751
752	if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
753		ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
754				segno, GET_SIT_TYPE(raw_sit));
755}
756
757void seg_info_from_raw_sit(struct seg_entry *se,
758		struct f2fs_sit_entry *raw_sit)
759{
760	se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
761	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(raw_sit);
762	memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
763	memcpy(se->ckpt_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
764	se->type = GET_SIT_TYPE(raw_sit);
765	se->mtime = le64_to_cpu(raw_sit->mtime);
766}
767
768struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
769		unsigned int segno)
770{
771	struct sit_info *sit_i = SIT_I(sbi);
772	return &sit_i->sentries[segno];
773}
774
775int get_sum_block(struct f2fs_sb_info *sbi, unsigned int segno,
776				struct f2fs_summary_block *sum_blk)
777{
778	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
779	struct curseg_info *curseg;
780	int type, ret;
781	u64 ssa_blk;
782
783	ssa_blk = GET_SUM_BLKADDR(sbi, segno);
784	for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
785		if (segno == ckpt->cur_node_segno[type]) {
786			curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
787			if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
788				ASSERT_MSG("segno [0x%x] indicates a data "
789						"segment, but should be node",
790						segno);
791				return -EINVAL;
792			}
793			memcpy(sum_blk, curseg->sum_blk, BLOCK_SZ);
794			return SEG_TYPE_CUR_NODE;
795		}
796	}
797
798	for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
799		if (segno == ckpt->cur_data_segno[type]) {
800			curseg = CURSEG_I(sbi, type);
801			if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
802				ASSERT_MSG("segno [0x%x] indicates a node "
803						"segment, but should be data",
804						segno);
805				return -EINVAL;
806			}
807			DBG(2, "segno [0x%x] is current data seg[0x%x]\n",
808								segno, type);
809			memcpy(sum_blk, curseg->sum_blk, BLOCK_SZ);
810			return SEG_TYPE_CUR_DATA;
811		}
812	}
813
814	ret = dev_read_block(sum_blk, ssa_blk);
815	ASSERT(ret >= 0);
816
817	if (IS_SUM_NODE_SEG(sum_blk->footer))
818		return SEG_TYPE_NODE;
819	else
820		return SEG_TYPE_DATA;
821
822}
823
824int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
825				struct f2fs_summary *sum_entry)
826{
827	struct f2fs_summary_block *sum_blk;
828	u32 segno, offset;
829	int ret;
830
831	segno = GET_SEGNO(sbi, blk_addr);
832	offset = OFFSET_IN_SEG(sbi, blk_addr);
833
834	sum_blk = calloc(BLOCK_SZ, 1);
835
836	ret = get_sum_block(sbi, segno, sum_blk);
837	memcpy(sum_entry, &(sum_blk->entries[offset]),
838				sizeof(struct f2fs_summary));
839	free(sum_blk);
840	return ret;
841}
842
843static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
844				struct f2fs_nat_entry *raw_nat)
845{
846	struct f2fs_nm_info *nm_i = NM_I(sbi);
847	struct f2fs_nat_block *nat_block;
848	pgoff_t block_off;
849	pgoff_t block_addr;
850	int seg_off, entry_off;
851	int ret;
852
853	if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
854		return;
855
856	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
857
858	block_off = nid / NAT_ENTRY_PER_BLOCK;
859	entry_off = nid % NAT_ENTRY_PER_BLOCK;
860
861	seg_off = block_off >> sbi->log_blocks_per_seg;
862	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
863			(seg_off << sbi->log_blocks_per_seg << 1) +
864			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
865
866	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
867		block_addr += sbi->blocks_per_seg;
868
869	ret = dev_read_block(nat_block, block_addr);
870	ASSERT(ret >= 0);
871
872	memcpy(raw_nat, &nat_block->entries[entry_off],
873					sizeof(struct f2fs_nat_entry));
874	free(nat_block);
875}
876
877void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
878{
879	struct f2fs_nat_entry raw_nat;
880	get_nat_entry(sbi, nid, &raw_nat);
881	ni->nid = nid;
882	node_info_from_raw_nat(ni, &raw_nat);
883}
884
885void build_sit_entries(struct f2fs_sb_info *sbi)
886{
887	struct sit_info *sit_i = SIT_I(sbi);
888	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
889	struct f2fs_summary_block *sum = curseg->sum_blk;
890	unsigned int segno;
891
892	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
893		struct seg_entry *se = &sit_i->sentries[segno];
894		struct f2fs_sit_block *sit_blk;
895		struct f2fs_sit_entry sit;
896		int i;
897
898		for (i = 0; i < sits_in_cursum(sum); i++) {
899			if (le32_to_cpu(segno_in_journal(sum, i)) == segno) {
900				sit = sit_in_journal(sum, i);
901				goto got_it;
902			}
903		}
904		sit_blk = get_current_sit_page(sbi, segno);
905		sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
906		free(sit_blk);
907got_it:
908		check_block_count(sbi, segno, &sit);
909		seg_info_from_raw_sit(se, &sit);
910	}
911
912}
913
914int build_segment_manager(struct f2fs_sb_info *sbi)
915{
916	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
917	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
918	struct f2fs_sm_info *sm_info;
919
920	sm_info = malloc(sizeof(struct f2fs_sm_info));
921	if (!sm_info)
922		return -ENOMEM;
923
924	/* init sm info */
925	sbi->sm_info = sm_info;
926	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
927	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
928	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
929	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
930	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
931	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
932	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
933
934	build_sit_info(sbi);
935
936	build_curseg(sbi);
937
938	build_sit_entries(sbi);
939
940	return 0;
941}
942
943void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
944{
945	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
946	struct f2fs_sm_info *sm_i = SM_I(sbi);
947	unsigned int segno = 0;
948	char *ptr = NULL;
949	u32 sum_vblocks = 0;
950	u32 free_segs = 0;
951	struct seg_entry *se;
952
953	fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
954	fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
955	ptr = fsck->sit_area_bitmap;
956
957	ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
958
959	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
960		se = get_seg_entry(sbi, segno);
961
962		memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
963		ptr += SIT_VBLOCK_MAP_SIZE;
964
965		if (se->valid_blocks == 0x0) {
966			if (sbi->ckpt->cur_node_segno[0] == segno ||
967					sbi->ckpt->cur_data_segno[0] == segno ||
968					sbi->ckpt->cur_node_segno[1] == segno ||
969					sbi->ckpt->cur_data_segno[1] == segno ||
970					sbi->ckpt->cur_node_segno[2] == segno ||
971					sbi->ckpt->cur_data_segno[2] == segno) {
972				continue;
973			} else {
974				free_segs++;
975			}
976		} else {
977			sum_vblocks += se->valid_blocks;
978		}
979	}
980	fsck->chk.sit_valid_blocks = sum_vblocks;
981	fsck->chk.sit_free_segs = free_segs;
982
983	DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
984			sum_vblocks, sum_vblocks,
985			free_segs, free_segs);
986}
987
988void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
989{
990	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
991	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
992	struct sit_info *sit_i = SIT_I(sbi);
993	unsigned int segno = 0;
994	struct f2fs_summary_block *sum = curseg->sum_blk;
995	char *ptr = NULL;
996
997	/* remove sit journal */
998	sum->n_sits = 0;
999
1000	fsck->chk.free_segs = 0;
1001
1002	ptr = fsck->main_area_bitmap;
1003
1004	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1005		struct f2fs_sit_block *sit_blk;
1006		struct f2fs_sit_entry *sit;
1007		struct seg_entry *se;
1008		u16 valid_blocks = 0;
1009		u16 type;
1010		int i;
1011
1012		sit_blk = get_current_sit_page(sbi, segno);
1013		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1014		memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
1015
1016		/* update valid block count */
1017		for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1018			valid_blocks += get_bits_in_byte(sit->valid_map[i]);
1019
1020		se = get_seg_entry(sbi, segno);
1021		type = se->type;
1022		if (type >= NO_CHECK_TYPE) {
1023			ASSERT(valid_blocks);
1024			type = 0;
1025		}
1026		sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
1027								valid_blocks);
1028		rewrite_current_sit_page(sbi, segno, sit_blk);
1029		free(sit_blk);
1030
1031		if (valid_blocks == 0 &&
1032				sbi->ckpt->cur_node_segno[0] != segno &&
1033				sbi->ckpt->cur_data_segno[0] != segno &&
1034				sbi->ckpt->cur_node_segno[1] != segno &&
1035				sbi->ckpt->cur_data_segno[1] != segno &&
1036				sbi->ckpt->cur_node_segno[2] != segno &&
1037				sbi->ckpt->cur_data_segno[2] != segno)
1038			fsck->chk.free_segs++;
1039
1040		ptr += SIT_VBLOCK_MAP_SIZE;
1041	}
1042}
1043
1044int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
1045					struct f2fs_nat_entry *raw_nat)
1046{
1047	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1048	struct f2fs_summary_block *sum = curseg->sum_blk;
1049	int i = 0;
1050
1051	for (i = 0; i < nats_in_cursum(sum); i++) {
1052		if (le32_to_cpu(nid_in_journal(sum, i)) == nid) {
1053			memcpy(raw_nat, &nat_in_journal(sum, i),
1054						sizeof(struct f2fs_nat_entry));
1055			DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
1056			return i;
1057		}
1058	}
1059	return -1;
1060}
1061
1062void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
1063{
1064	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1065	struct f2fs_summary_block *sum = curseg->sum_blk;
1066	struct f2fs_nm_info *nm_i = NM_I(sbi);
1067	struct f2fs_nat_block *nat_block;
1068	pgoff_t block_off;
1069	pgoff_t block_addr;
1070	int seg_off, entry_off;
1071	int ret;
1072	int i = 0;
1073
1074	/* check in journal */
1075	for (i = 0; i < nats_in_cursum(sum); i++) {
1076		if (le32_to_cpu(nid_in_journal(sum, i)) == nid) {
1077			memset(&nat_in_journal(sum, i), 0,
1078					sizeof(struct f2fs_nat_entry));
1079			FIX_MSG("Remove nid [0x%x] in nat journal\n", nid);
1080			return;
1081		}
1082	}
1083	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1084
1085	block_off = nid / NAT_ENTRY_PER_BLOCK;
1086	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1087
1088	seg_off = block_off >> sbi->log_blocks_per_seg;
1089	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1090			(seg_off << sbi->log_blocks_per_seg << 1) +
1091			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
1092
1093	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
1094		block_addr += sbi->blocks_per_seg;
1095
1096	ret = dev_read_block(nat_block, block_addr);
1097	ASSERT(ret >= 0);
1098
1099	memset(&nat_block->entries[entry_off], 0,
1100					sizeof(struct f2fs_nat_entry));
1101
1102	ret = dev_write_block(nat_block, block_addr);
1103	ASSERT(ret >= 0);
1104	free(nat_block);
1105}
1106
1107void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
1108{
1109	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1110	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1111	struct f2fs_nm_info *nm_i = NM_I(sbi);
1112	struct f2fs_nat_block *nat_block;
1113	u32 nid, nr_nat_blks;
1114	pgoff_t block_off;
1115	pgoff_t block_addr;
1116	int seg_off;
1117	int ret;
1118	unsigned int i;
1119
1120	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1121	ASSERT(nat_block);
1122
1123	/* Alloc & build nat entry bitmap */
1124	nr_nat_blks = (le32_to_cpu(raw_sb->segment_count_nat) / 2) <<
1125						sbi->log_blocks_per_seg;
1126
1127	fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
1128	fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
1129	fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
1130	ASSERT(fsck->nat_area_bitmap != NULL);
1131
1132	for (block_off = 0; block_off < nr_nat_blks; block_off++) {
1133
1134		seg_off = block_off >> sbi->log_blocks_per_seg;
1135		block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1136			(seg_off << sbi->log_blocks_per_seg << 1) +
1137			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
1138
1139		if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
1140			block_addr += sbi->blocks_per_seg;
1141
1142		ret = dev_read_block(nat_block, block_addr);
1143		ASSERT(ret >= 0);
1144
1145		nid = block_off * NAT_ENTRY_PER_BLOCK;
1146		for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
1147			struct f2fs_nat_entry raw_nat;
1148			struct node_info ni;
1149			ni.nid = nid + i;
1150
1151			if ((nid + i) == F2FS_NODE_INO(sbi) ||
1152					(nid + i) == F2FS_META_INO(sbi)) {
1153				ASSERT(nat_block->entries[i].block_addr != 0x0);
1154				continue;
1155			}
1156
1157			if (lookup_nat_in_journal(sbi, nid + i,
1158							&raw_nat) >= 0) {
1159				node_info_from_raw_nat(&ni, &raw_nat);
1160				if (ni.blk_addr != 0x0) {
1161					f2fs_set_bit(nid + i,
1162							fsck->nat_area_bitmap);
1163					fsck->chk.valid_nat_entry_cnt++;
1164					DBG(3, "nid[0x%x] in nat cache\n",
1165								nid + i);
1166				}
1167			} else {
1168				node_info_from_raw_nat(&ni,
1169						&nat_block->entries[i]);
1170				if (ni.blk_addr == 0)
1171					continue;
1172				ASSERT(nid + i != 0x0);
1173
1174				DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
1175					nid + i, ni.blk_addr, ni.ino);
1176				f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
1177				fsck->chk.valid_nat_entry_cnt++;
1178			}
1179		}
1180	}
1181	free(nat_block);
1182
1183	DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
1184			fsck->chk.valid_nat_entry_cnt,
1185			fsck->chk.valid_nat_entry_cnt);
1186}
1187
1188int f2fs_do_mount(struct f2fs_sb_info *sbi)
1189{
1190	int ret;
1191
1192	sbi->active_logs = NR_CURSEG_TYPE;
1193	ret = validate_super_block(sbi, 0);
1194	if (ret) {
1195		ret = validate_super_block(sbi, 1);
1196		if (ret)
1197			return -1;
1198	}
1199
1200	print_raw_sb_info(sbi);
1201
1202	init_sb_info(sbi);
1203
1204	ret = get_valid_checkpoint(sbi);
1205	if (ret) {
1206		ERR_MSG("Can't find valid checkpoint\n");
1207		return -1;
1208	}
1209
1210	if (sanity_check_ckpt(sbi)) {
1211		ERR_MSG("Checkpoint is polluted\n");
1212		return -1;
1213	}
1214
1215	print_ckpt_info(sbi);
1216
1217	if (config.auto_fix) {
1218		u32 flag = le32_to_cpu(sbi->ckpt->ckpt_flags);
1219
1220		if (flag & CP_FSCK_FLAG)
1221			config.fix_on = 1;
1222		else
1223			return 1;
1224	}
1225
1226	config.bug_on = 0;
1227
1228	sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count);
1229	sbi->total_valid_inode_count =
1230			le32_to_cpu(sbi->ckpt->valid_inode_count);
1231	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
1232	sbi->total_valid_block_count =
1233			le64_to_cpu(sbi->ckpt->valid_block_count);
1234	sbi->last_valid_block_count = sbi->total_valid_block_count;
1235	sbi->alloc_valid_block_count = 0;
1236
1237	if (build_segment_manager(sbi)) {
1238		ERR_MSG("build_segment_manager failed\n");
1239		return -1;
1240	}
1241
1242	if (build_node_manager(sbi)) {
1243		ERR_MSG("build_segment_manager failed\n");
1244		return -1;
1245	}
1246
1247	return 0;
1248}
1249
1250void f2fs_do_umount(struct f2fs_sb_info *sbi)
1251{
1252	struct sit_info *sit_i = SIT_I(sbi);
1253	struct f2fs_sm_info *sm_i = SM_I(sbi);
1254	struct f2fs_nm_info *nm_i = NM_I(sbi);
1255	unsigned int i;
1256
1257	/* free nm_info */
1258	free(nm_i->nat_bitmap);
1259	free(sbi->nm_info);
1260
1261	/* free sit_info */
1262	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
1263		free(sit_i->sentries[i].cur_valid_map);
1264		free(sit_i->sentries[i].ckpt_valid_map);
1265	}
1266	free(sit_i->sit_bitmap);
1267	free(sm_i->sit_info);
1268
1269	/* free sm_info */
1270	for (i = 0; i < NR_CURSEG_TYPE; i++)
1271		free(sm_i->curseg_array[i].sum_blk);
1272
1273	free(sm_i->curseg_array);
1274	free(sbi->sm_info);
1275
1276	free(sbi->ckpt);
1277	free(sbi->raw_super);
1278}
1279