mount.c revision 4b1ecd77a182c731c29cd31986508fdbe53829ed
1/**
2 * mount.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 *             http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include "fsck.h"
12#include <locale.h>
13
14static u32 get_free_segments(struct f2fs_sb_info *sbi)
15{
16	u32 i, free_segs = 0;
17
18	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
19		struct seg_entry *se = get_seg_entry(sbi, i);
20
21		if (se->valid_blocks == 0x0 &&
22				!IS_CUR_SEGNO(sbi, i, NO_CHECK_TYPE))
23			free_segs++;
24	}
25	return free_segs;
26}
27
28void update_free_segments(struct f2fs_sb_info *sbi)
29{
30	char *progress = "-*|*-";
31	static int i = 0;
32
33	MSG(0, "\r [ %c ] Free segments: 0x%x", progress[i % 5], get_free_segments(sbi));
34	fflush(stdout);
35	i++;
36}
37
38void print_inode_info(struct f2fs_inode *inode, int name)
39{
40	unsigned char en[F2FS_NAME_LEN + 1];
41	unsigned int i = 0;
42	int namelen = le32_to_cpu(inode->i_namelen);
43	int is_encrypt = file_is_encrypt(inode);
44
45	namelen = convert_encrypted_name(inode->i_name, namelen, en, is_encrypt);
46	en[namelen] = '\0';
47	if (name && namelen) {
48		inode->i_name[namelen] = '\0';
49		MSG(0, " - File name         : %s%s\n", en,
50				is_encrypt ? " <encrypted>" : "");
51		setlocale(LC_ALL, "");
52		MSG(0, " - File size         : %'llu (bytes)\n",
53				le64_to_cpu(inode->i_size));
54		return;
55	}
56
57	DISP_u32(inode, i_mode);
58	DISP_u32(inode, i_advise);
59	DISP_u32(inode, i_uid);
60	DISP_u32(inode, i_gid);
61	DISP_u32(inode, i_links);
62	DISP_u64(inode, i_size);
63	DISP_u64(inode, i_blocks);
64
65	DISP_u64(inode, i_atime);
66	DISP_u32(inode, i_atime_nsec);
67	DISP_u64(inode, i_ctime);
68	DISP_u32(inode, i_ctime_nsec);
69	DISP_u64(inode, i_mtime);
70	DISP_u32(inode, i_mtime_nsec);
71
72	DISP_u32(inode, i_generation);
73	DISP_u32(inode, i_current_depth);
74	DISP_u32(inode, i_xattr_nid);
75	DISP_u32(inode, i_flags);
76	DISP_u32(inode, i_inline);
77	DISP_u32(inode, i_pino);
78	DISP_u32(inode, i_dir_level);
79
80	if (namelen) {
81		DISP_u32(inode, i_namelen);
82		printf("%-30s\t\t[%s]\n", "i_name", en);
83	}
84
85	printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
86			le32_to_cpu(inode->i_ext.fofs),
87			le32_to_cpu(inode->i_ext.blk_addr),
88			le32_to_cpu(inode->i_ext.len));
89
90	DISP_u32(inode, i_addr[0]);	/* Pointers to data blocks */
91	DISP_u32(inode, i_addr[1]);	/* Pointers to data blocks */
92	DISP_u32(inode, i_addr[2]);	/* Pointers to data blocks */
93	DISP_u32(inode, i_addr[3]);	/* Pointers to data blocks */
94
95	for (i = 4; i < ADDRS_PER_INODE(inode); i++) {
96		if (inode->i_addr[i] != 0x0) {
97			printf("i_addr[0x%x] points data block\r\t\t[0x%4x]\n",
98					i, le32_to_cpu(inode->i_addr[i]));
99			break;
100		}
101	}
102
103	DISP_u32(inode, i_nid[0]);	/* direct */
104	DISP_u32(inode, i_nid[1]);	/* direct */
105	DISP_u32(inode, i_nid[2]);	/* indirect */
106	DISP_u32(inode, i_nid[3]);	/* indirect */
107	DISP_u32(inode, i_nid[4]);	/* double indirect */
108
109	printf("\n");
110}
111
112void print_node_info(struct f2fs_node *node_block, int verbose)
113{
114	nid_t ino = le32_to_cpu(node_block->footer.ino);
115	nid_t nid = le32_to_cpu(node_block->footer.nid);
116	/* Is this inode? */
117	if (ino == nid) {
118		DBG(verbose, "Node ID [0x%x:%u] is inode\n", nid, nid);
119		print_inode_info(&node_block->i, verbose);
120	} else {
121		int i;
122		u32 *dump_blk = (u32 *)node_block;
123		DBG(verbose,
124			"Node ID [0x%x:%u] is direct node or indirect node.\n",
125								nid, nid);
126		for (i = 0; i <= 10; i++)
127			MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
128						i, dump_blk[i], dump_blk[i]);
129	}
130}
131
132static void DISP_label(u_int16_t *name)
133{
134	char buffer[MAX_VOLUME_NAME];
135
136	utf16_to_utf8(buffer, name, MAX_VOLUME_NAME, MAX_VOLUME_NAME);
137	printf("%-30s" "\t\t[%s]\n", "volum_name", buffer);
138}
139
140void print_raw_sb_info(struct f2fs_super_block *sb)
141{
142	if (!c.dbg_lv)
143		return;
144
145	printf("\n");
146	printf("+--------------------------------------------------------+\n");
147	printf("| Super block                                            |\n");
148	printf("+--------------------------------------------------------+\n");
149
150	DISP_u32(sb, magic);
151	DISP_u32(sb, major_ver);
152
153	DISP_label(sb->volume_name);
154
155	DISP_u32(sb, minor_ver);
156	DISP_u32(sb, log_sectorsize);
157	DISP_u32(sb, log_sectors_per_block);
158
159	DISP_u32(sb, log_blocksize);
160	DISP_u32(sb, log_blocks_per_seg);
161	DISP_u32(sb, segs_per_sec);
162	DISP_u32(sb, secs_per_zone);
163	DISP_u32(sb, checksum_offset);
164	DISP_u64(sb, block_count);
165
166	DISP_u32(sb, section_count);
167	DISP_u32(sb, segment_count);
168	DISP_u32(sb, segment_count_ckpt);
169	DISP_u32(sb, segment_count_sit);
170	DISP_u32(sb, segment_count_nat);
171
172	DISP_u32(sb, segment_count_ssa);
173	DISP_u32(sb, segment_count_main);
174	DISP_u32(sb, segment0_blkaddr);
175
176	DISP_u32(sb, cp_blkaddr);
177	DISP_u32(sb, sit_blkaddr);
178	DISP_u32(sb, nat_blkaddr);
179	DISP_u32(sb, ssa_blkaddr);
180	DISP_u32(sb, main_blkaddr);
181
182	DISP_u32(sb, root_ino);
183	DISP_u32(sb, node_ino);
184	DISP_u32(sb, meta_ino);
185	DISP_u32(sb, cp_payload);
186	DISP("%s", sb, version);
187	printf("\n");
188}
189
190void print_ckpt_info(struct f2fs_sb_info *sbi)
191{
192	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
193
194	if (!c.dbg_lv)
195		return;
196
197	printf("\n");
198	printf("+--------------------------------------------------------+\n");
199	printf("| Checkpoint                                             |\n");
200	printf("+--------------------------------------------------------+\n");
201
202	DISP_u64(cp, checkpoint_ver);
203	DISP_u64(cp, user_block_count);
204	DISP_u64(cp, valid_block_count);
205	DISP_u32(cp, rsvd_segment_count);
206	DISP_u32(cp, overprov_segment_count);
207	DISP_u32(cp, free_segment_count);
208
209	DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
210	DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
211	DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
212	DISP_u32(cp, cur_node_segno[0]);
213	DISP_u32(cp, cur_node_segno[1]);
214	DISP_u32(cp, cur_node_segno[2]);
215
216	DISP_u32(cp, cur_node_blkoff[0]);
217	DISP_u32(cp, cur_node_blkoff[1]);
218	DISP_u32(cp, cur_node_blkoff[2]);
219
220
221	DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
222	DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
223	DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
224	DISP_u32(cp, cur_data_segno[0]);
225	DISP_u32(cp, cur_data_segno[1]);
226	DISP_u32(cp, cur_data_segno[2]);
227
228	DISP_u32(cp, cur_data_blkoff[0]);
229	DISP_u32(cp, cur_data_blkoff[1]);
230	DISP_u32(cp, cur_data_blkoff[2]);
231
232	DISP_u32(cp, ckpt_flags);
233	DISP_u32(cp, cp_pack_total_block_count);
234	DISP_u32(cp, cp_pack_start_sum);
235	DISP_u32(cp, valid_node_count);
236	DISP_u32(cp, valid_inode_count);
237	DISP_u32(cp, next_free_nid);
238	DISP_u32(cp, sit_ver_bitmap_bytesize);
239	DISP_u32(cp, nat_ver_bitmap_bytesize);
240	DISP_u32(cp, checksum_offset);
241	DISP_u64(cp, elapsed_time);
242
243	DISP_u32(cp, sit_nat_version_bitmap[0]);
244	printf("\n\n");
245}
246
247void print_cp_state(u32 flag)
248{
249	MSG(0, "Info: checkpoint state = %x : ", flag);
250	if (flag & CP_FSCK_FLAG)
251		MSG(0, "%s", " fsck");
252	if (flag & CP_ERROR_FLAG)
253		MSG(0, "%s", " error");
254	if (flag & CP_COMPACT_SUM_FLAG)
255		MSG(0, "%s", " compacted_summary");
256	if (flag & CP_ORPHAN_PRESENT_FLAG)
257		MSG(0, "%s", " orphan_inodes");
258	if (flag & CP_FASTBOOT_FLAG)
259		MSG(0, "%s", " fastboot");
260	if (flag & CP_UMOUNT_FLAG)
261		MSG(0, "%s", " unmount");
262	else
263		MSG(0, "%s", " sudden-power-off");
264	MSG(0, "\n");
265}
266
267void print_sb_state(struct f2fs_super_block *sb)
268{
269	__le32 f = sb->feature;
270	int i;
271
272	MSG(0, "Info: superblock features = %x : ", f);
273	if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
274		MSG(0, "%s", " encrypt");
275	}
276	if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
277		MSG(0, "%s", " zoned block device");
278	}
279	MSG(0, "\n");
280	MSG(0, "Info: superblock encrypt level = %d, salt = ",
281					sb->encryption_level);
282	for (i = 0; i < 16; i++)
283		MSG(0, "%02x", sb->encrypt_pw_salt[i]);
284	MSG(0, "\n");
285}
286
287static inline int sanity_check_area_boundary(struct f2fs_super_block *sb,
288							u64 offset)
289{
290	u32 segment0_blkaddr = get_sb(segment0_blkaddr);
291	u32 cp_blkaddr = get_sb(cp_blkaddr);
292	u32 sit_blkaddr = get_sb(sit_blkaddr);
293	u32 nat_blkaddr = get_sb(nat_blkaddr);
294	u32 ssa_blkaddr = get_sb(ssa_blkaddr);
295	u32 main_blkaddr = get_sb(main_blkaddr);
296	u32 segment_count_ckpt = get_sb(segment_count_ckpt);
297	u32 segment_count_sit = get_sb(segment_count_sit);
298	u32 segment_count_nat = get_sb(segment_count_nat);
299	u32 segment_count_ssa = get_sb(segment_count_ssa);
300	u32 segment_count_main = get_sb(segment_count_main);
301	u32 segment_count = get_sb(segment_count);
302	u32 log_blocks_per_seg = get_sb(log_blocks_per_seg);
303	u64 main_end_blkaddr = main_blkaddr +
304				(segment_count_main << log_blocks_per_seg);
305	u64 seg_end_blkaddr = segment0_blkaddr +
306				(segment_count << log_blocks_per_seg);
307
308	if (segment0_blkaddr != cp_blkaddr) {
309		MSG(0, "\tMismatch segment0(%u) cp_blkaddr(%u)\n",
310				segment0_blkaddr, cp_blkaddr);
311		return -1;
312	}
313
314	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
315							sit_blkaddr) {
316		MSG(0, "\tWrong CP boundary, start(%u) end(%u) blocks(%u)\n",
317			cp_blkaddr, sit_blkaddr,
318			segment_count_ckpt << log_blocks_per_seg);
319		return -1;
320	}
321
322	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
323							nat_blkaddr) {
324		MSG(0, "\tWrong SIT boundary, start(%u) end(%u) blocks(%u)\n",
325			sit_blkaddr, nat_blkaddr,
326			segment_count_sit << log_blocks_per_seg);
327		return -1;
328	}
329
330	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
331							ssa_blkaddr) {
332		MSG(0, "\tWrong NAT boundary, start(%u) end(%u) blocks(%u)\n",
333			nat_blkaddr, ssa_blkaddr,
334			segment_count_nat << log_blocks_per_seg);
335		return -1;
336	}
337
338	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
339							main_blkaddr) {
340		MSG(0, "\tWrong SSA boundary, start(%u) end(%u) blocks(%u)\n",
341			ssa_blkaddr, main_blkaddr,
342			segment_count_ssa << log_blocks_per_seg);
343		return -1;
344	}
345
346	if (main_end_blkaddr > seg_end_blkaddr) {
347		MSG(0, "\tWrong MAIN_AREA, start(%u) end(%u) block(%u)\n",
348			main_blkaddr,
349			segment0_blkaddr +
350				(segment_count << log_blocks_per_seg),
351			segment_count_main << log_blocks_per_seg);
352		return -1;
353	} else if (main_end_blkaddr < seg_end_blkaddr) {
354		int err;
355
356		set_sb(segment_count, (main_end_blkaddr -
357				segment0_blkaddr) >> log_blocks_per_seg);
358
359		err = dev_write(sb, offset, sizeof(struct f2fs_super_block));
360		MSG(0, "Info: Fix alignment: %s, start(%u) end(%u) block(%u)\n",
361			err ? "failed": "done",
362			main_blkaddr,
363			segment0_blkaddr +
364				(segment_count << log_blocks_per_seg),
365			segment_count_main << log_blocks_per_seg);
366	}
367	return 0;
368}
369
370int sanity_check_raw_super(struct f2fs_super_block *sb, u64 offset)
371{
372	unsigned int blocksize;
373
374	if (F2FS_SUPER_MAGIC != get_sb(magic))
375		return -1;
376
377	if (F2FS_BLKSIZE != PAGE_CACHE_SIZE)
378		return -1;
379
380	blocksize = 1 << get_sb(log_blocksize);
381	if (F2FS_BLKSIZE != blocksize)
382		return -1;
383
384	/* check log blocks per segment */
385	if (get_sb(log_blocks_per_seg) != 9)
386		return -1;
387
388	/* Currently, support 512/1024/2048/4096 bytes sector size */
389	if (get_sb(log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE ||
390			get_sb(log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE)
391		return -1;
392
393	if (get_sb(log_sectors_per_block) + get_sb(log_sectorsize) !=
394						F2FS_MAX_LOG_SECTOR_SIZE)
395		return -1;
396
397	/* check reserved ino info */
398	if (get_sb(node_ino) != 1 || get_sb(meta_ino) != 2 ||
399					get_sb(root_ino) != 3)
400		return -1;
401
402	/* Check zoned block device feature */
403	if (c.zoned_model == F2FS_ZONED_HM &&
404			!(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
405		MSG(0, "\tMissing zoned block device feature\n");
406		return -1;
407	}
408
409	if (sanity_check_area_boundary(sb, offset))
410		return -1;
411	return 0;
412}
413
414int validate_super_block(struct f2fs_sb_info *sbi, int block)
415{
416	u64 offset;
417
418	sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
419
420	if (block == 0)
421		offset = F2FS_SUPER_OFFSET;
422	else
423		offset = F2FS_BLKSIZE + F2FS_SUPER_OFFSET;
424
425	if (dev_read(sbi->raw_super, offset, sizeof(struct f2fs_super_block)))
426		return -1;
427
428	if (!sanity_check_raw_super(sbi->raw_super, offset)) {
429		/* get kernel version */
430		if (c.kd >= 0) {
431			dev_read_version(c.version, 0, VERSION_LEN);
432			get_kernel_version(c.version);
433		} else {
434			memset(c.version, 0, VERSION_LEN);
435		}
436
437		/* build sb version */
438		memcpy(c.sb_version, sbi->raw_super->version, VERSION_LEN);
439		get_kernel_version(c.sb_version);
440		memcpy(c.init_version, sbi->raw_super->init_version, VERSION_LEN);
441		get_kernel_version(c.init_version);
442
443		MSG(0, "Info: MKFS version\n  \"%s\"\n", c.init_version);
444		MSG(0, "Info: FSCK version\n  from \"%s\"\n    to \"%s\"\n",
445					c.sb_version, c.version);
446		if (memcmp(c.sb_version, c.version, VERSION_LEN)) {
447			int ret;
448
449			memcpy(sbi->raw_super->version,
450						c.version, VERSION_LEN);
451			ret = dev_write(sbi->raw_super, offset,
452					sizeof(struct f2fs_super_block));
453			ASSERT(ret >= 0);
454
455			c.auto_fix = 0;
456			c.fix_on = 1;
457		}
458		print_sb_state(sbi->raw_super);
459		return 0;
460	}
461
462	free(sbi->raw_super);
463	sbi->raw_super = NULL;
464	MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", block);
465
466	return -EINVAL;
467}
468
469int init_sb_info(struct f2fs_sb_info *sbi)
470{
471	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
472	u64 total_sectors;
473
474	sbi->log_sectors_per_block = get_sb(log_sectors_per_block);
475	sbi->log_blocksize = get_sb(log_blocksize);
476	sbi->blocksize = 1 << sbi->log_blocksize;
477	sbi->log_blocks_per_seg = get_sb(log_blocks_per_seg);
478	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
479	sbi->segs_per_sec = get_sb(segs_per_sec);
480	sbi->secs_per_zone = get_sb(secs_per_zone);
481	sbi->total_sections = get_sb(section_count);
482	sbi->total_node_count = (get_sb(segment_count_nat) / 2) *
483				sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
484	sbi->root_ino_num = get_sb(root_ino);
485	sbi->node_ino_num = get_sb(node_ino);
486	sbi->meta_ino_num = get_sb(meta_ino);
487	sbi->cur_victim_sec = NULL_SEGNO;
488
489	total_sectors = get_sb(block_count) << sbi->log_sectors_per_block;
490	MSG(0, "Info: total FS sectors = %"PRIu64" (%"PRIu64" MB)\n",
491				total_sectors, total_sectors >>
492						(20 - get_sb(log_sectorsize)));
493	return 0;
494}
495
496void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
497				unsigned long long *version)
498{
499	void *cp_page_1, *cp_page_2;
500	struct f2fs_checkpoint *cp;
501	unsigned long blk_size = sbi->blocksize;
502	unsigned long long cur_version = 0, pre_version = 0;
503	unsigned int crc = 0;
504	size_t crc_offset;
505
506	/* Read the 1st cp block in this CP pack */
507	cp_page_1 = malloc(PAGE_SIZE);
508	if (dev_read_block(cp_page_1, cp_addr) < 0)
509		goto invalid_cp1;
510
511	cp = (struct f2fs_checkpoint *)cp_page_1;
512	crc_offset = get_cp(checksum_offset);
513	if (crc_offset >= blk_size)
514		goto invalid_cp1;
515
516	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
517	if (f2fs_crc_valid(crc, cp, crc_offset))
518		goto invalid_cp1;
519
520	pre_version = get_cp(checkpoint_ver);
521
522	/* Read the 2nd cp block in this CP pack */
523	cp_page_2 = malloc(PAGE_SIZE);
524	cp_addr += get_cp(cp_pack_total_block_count) - 1;
525
526	if (dev_read_block(cp_page_2, cp_addr) < 0)
527		goto invalid_cp2;
528
529	cp = (struct f2fs_checkpoint *)cp_page_2;
530	crc_offset = get_cp(checksum_offset);
531	if (crc_offset >= blk_size)
532		goto invalid_cp2;
533
534	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
535	if (f2fs_crc_valid(crc, cp, crc_offset))
536		goto invalid_cp2;
537
538	cur_version = get_cp(checkpoint_ver);
539
540	if (cur_version == pre_version) {
541		*version = cur_version;
542		free(cp_page_2);
543		return cp_page_1;
544	}
545
546invalid_cp2:
547	free(cp_page_2);
548invalid_cp1:
549	free(cp_page_1);
550	return NULL;
551}
552
553int get_valid_checkpoint(struct f2fs_sb_info *sbi)
554{
555	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
556	void *cp1, *cp2, *cur_page;
557	unsigned long blk_size = sbi->blocksize;
558	unsigned long long cp1_version = 0, cp2_version = 0, version;
559	unsigned long long cp_start_blk_no;
560	unsigned int cp_blks = 1 + get_sb(cp_payload);
561	int ret;
562
563	sbi->ckpt = malloc(cp_blks * blk_size);
564	if (!sbi->ckpt)
565		return -ENOMEM;
566	/*
567	 * Finding out valid cp block involves read both
568	 * sets( cp pack1 and cp pack 2)
569	 */
570	cp_start_blk_no = get_sb(cp_blkaddr);
571	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
572
573	/* The second checkpoint pack should start at the next segment */
574	cp_start_blk_no += 1 << get_sb(log_blocks_per_seg);
575	cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
576
577	if (cp1 && cp2) {
578		if (ver_after(cp2_version, cp1_version)) {
579			cur_page = cp2;
580			sbi->cur_cp = 2;
581			version = cp2_version;
582		} else {
583			cur_page = cp1;
584			sbi->cur_cp = 1;
585			version = cp1_version;
586		}
587	} else if (cp1) {
588		cur_page = cp1;
589		sbi->cur_cp = 1;
590		version = cp1_version;
591	} else if (cp2) {
592		cur_page = cp2;
593		sbi->cur_cp = 2;
594		version = cp2_version;
595	} else
596		goto fail_no_cp;
597
598	MSG(0, "Info: CKPT version = %llx\n", version);
599
600	memcpy(sbi->ckpt, cur_page, blk_size);
601
602	if (cp_blks > 1) {
603		unsigned int i;
604		unsigned long long cp_blk_no;
605
606		cp_blk_no = get_sb(cp_blkaddr);
607		if (cur_page == cp2)
608			cp_blk_no += 1 << get_sb(log_blocks_per_seg);
609
610		/* copy sit bitmap */
611		for (i = 1; i < cp_blks; i++) {
612			unsigned char *ckpt = (unsigned char *)sbi->ckpt;
613			ret = dev_read_block(cur_page, cp_blk_no + i);
614			ASSERT(ret >= 0);
615			memcpy(ckpt + i * blk_size, cur_page, blk_size);
616		}
617	}
618	if (cp1)
619		free(cp1);
620	if (cp2)
621		free(cp2);
622	return 0;
623
624fail_no_cp:
625	free(sbi->ckpt);
626	sbi->ckpt = NULL;
627	return -EINVAL;
628}
629
630int sanity_check_ckpt(struct f2fs_sb_info *sbi)
631{
632	unsigned int total, fsmeta;
633	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
634	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
635
636	total = get_sb(segment_count);
637	fsmeta = get_sb(segment_count_ckpt);
638	fsmeta += get_sb(segment_count_sit);
639	fsmeta += get_sb(segment_count_nat);
640	fsmeta += get_cp(rsvd_segment_count);
641	fsmeta += get_sb(segment_count_ssa);
642
643	if (fsmeta >= total)
644		return 1;
645
646	return 0;
647}
648
649static pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
650{
651	struct f2fs_nm_info *nm_i = NM_I(sbi);
652	pgoff_t block_off;
653	pgoff_t block_addr;
654	int seg_off;
655
656	block_off = NAT_BLOCK_OFFSET(start);
657	seg_off = block_off >> sbi->log_blocks_per_seg;
658
659	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
660			(seg_off << sbi->log_blocks_per_seg << 1) +
661			(block_off & ((1 << sbi->log_blocks_per_seg) -1)));
662
663	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
664		block_addr += sbi->blocks_per_seg;
665
666	return block_addr;
667}
668
669static int f2fs_init_nid_bitmap(struct f2fs_sb_info *sbi)
670{
671	struct f2fs_nm_info *nm_i = NM_I(sbi);
672	int nid_bitmap_size = (nm_i->max_nid + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
673	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
674	struct f2fs_summary_block *sum = curseg->sum_blk;
675	struct f2fs_journal *journal = &sum->journal;
676	struct f2fs_nat_block nat_block;
677	block_t start_blk;
678	nid_t nid;
679	int i;
680
681	if (!(c.func == SLOAD))
682		return 0;
683
684	nm_i->nid_bitmap = (char *)calloc(nid_bitmap_size, 1);
685	if (!nm_i->nid_bitmap)
686		return -ENOMEM;
687
688	/* arbitrarily set 0 bit */
689	f2fs_set_bit(0, nm_i->nid_bitmap);
690
691	memset((void *)&nat_block, 0, sizeof(struct f2fs_nat_block));
692
693	for (nid = 0; nid < nm_i->max_nid; nid++) {
694		if (!(nid % NAT_ENTRY_PER_BLOCK)) {
695			int ret;
696
697			start_blk = current_nat_addr(sbi, nid);
698			ret = dev_read_block((void *)&nat_block, start_blk);
699			ASSERT(ret >= 0);
700		}
701
702		if (nat_block.entries[nid % NAT_ENTRY_PER_BLOCK].block_addr)
703			f2fs_set_bit(nid, nm_i->nid_bitmap);
704	}
705
706	for (i = 0; i < nats_in_cursum(journal); i++) {
707		block_t addr;
708
709		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
710		nid = le32_to_cpu(nid_in_journal(journal, i));
711		if (addr != NULL_ADDR)
712			f2fs_set_bit(nid, nm_i->nid_bitmap);
713	}
714	return 0;
715}
716
717int init_node_manager(struct f2fs_sb_info *sbi)
718{
719	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
720	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
721	struct f2fs_nm_info *nm_i = NM_I(sbi);
722	unsigned char *version_bitmap;
723	unsigned int nat_segs, nat_blocks;
724
725	nm_i->nat_blkaddr = get_sb(nat_blkaddr);
726
727	/* segment_count_nat includes pair segment so divide to 2. */
728	nat_segs = get_sb(segment_count_nat) >> 1;
729	nat_blocks = nat_segs << get_sb(log_blocks_per_seg);
730	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
731	nm_i->fcnt = 0;
732	nm_i->nat_cnt = 0;
733	nm_i->init_scan_nid = get_cp(next_free_nid);
734	nm_i->next_scan_nid = get_cp(next_free_nid);
735
736	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
737
738	nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
739	if (!nm_i->nat_bitmap)
740		return -ENOMEM;
741	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
742	if (!version_bitmap)
743		return -EFAULT;
744
745	/* copy version bitmap */
746	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
747	return f2fs_init_nid_bitmap(sbi);
748}
749
750int build_node_manager(struct f2fs_sb_info *sbi)
751{
752	int err;
753	sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
754	if (!sbi->nm_info)
755		return -ENOMEM;
756
757	err = init_node_manager(sbi);
758	if (err)
759		return err;
760
761	return 0;
762}
763
764int build_sit_info(struct f2fs_sb_info *sbi)
765{
766	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
767	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
768	struct sit_info *sit_i;
769	unsigned int sit_segs, start;
770	char *src_bitmap, *dst_bitmap;
771	unsigned int bitmap_size;
772
773	sit_i = malloc(sizeof(struct sit_info));
774	if (!sit_i)
775		return -ENOMEM;
776
777	SM_I(sbi)->sit_info = sit_i;
778
779	sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1);
780
781	for (start = 0; start < TOTAL_SEGS(sbi); start++) {
782		sit_i->sentries[start].cur_valid_map
783			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
784		sit_i->sentries[start].ckpt_valid_map
785			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
786		if (!sit_i->sentries[start].cur_valid_map
787				|| !sit_i->sentries[start].ckpt_valid_map)
788			return -ENOMEM;
789	}
790
791	sit_segs = get_sb(segment_count_sit) >> 1;
792	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
793	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
794
795	dst_bitmap = malloc(bitmap_size);
796	memcpy(dst_bitmap, src_bitmap, bitmap_size);
797
798	sit_i->sit_base_addr = get_sb(sit_blkaddr);
799	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
800	sit_i->written_valid_blocks = get_cp(valid_block_count);
801	sit_i->sit_bitmap = dst_bitmap;
802	sit_i->bitmap_size = bitmap_size;
803	sit_i->dirty_sentries = 0;
804	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
805	sit_i->elapsed_time = get_cp(elapsed_time);
806	return 0;
807}
808
809void reset_curseg(struct f2fs_sb_info *sbi, int type)
810{
811	struct curseg_info *curseg = CURSEG_I(sbi, type);
812	struct summary_footer *sum_footer;
813	struct seg_entry *se;
814
815	sum_footer = &(curseg->sum_blk->footer);
816	memset(sum_footer, 0, sizeof(struct summary_footer));
817	if (IS_DATASEG(type))
818		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
819	if (IS_NODESEG(type))
820		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
821	se = get_seg_entry(sbi, curseg->segno);
822	se->type = type;
823}
824
825static void read_compacted_summaries(struct f2fs_sb_info *sbi)
826{
827	struct curseg_info *curseg;
828	unsigned int i, j, offset;
829	block_t start;
830	char *kaddr;
831	int ret;
832
833	start = start_sum_block(sbi);
834
835	kaddr = (char *)malloc(PAGE_SIZE);
836	ret = dev_read_block(kaddr, start++);
837	ASSERT(ret >= 0);
838
839	curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
840	memcpy(&curseg->sum_blk->journal.n_nats, kaddr, SUM_JOURNAL_SIZE);
841
842	curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
843	memcpy(&curseg->sum_blk->journal.n_sits, kaddr + SUM_JOURNAL_SIZE,
844						SUM_JOURNAL_SIZE);
845
846	offset = 2 * SUM_JOURNAL_SIZE;
847	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
848		unsigned short blk_off;
849		struct curseg_info *curseg = CURSEG_I(sbi, i);
850
851		reset_curseg(sbi, i);
852
853		if (curseg->alloc_type == SSR)
854			blk_off = sbi->blocks_per_seg;
855		else
856			blk_off = curseg->next_blkoff;
857
858		for (j = 0; j < blk_off; j++) {
859			struct f2fs_summary *s;
860			s = (struct f2fs_summary *)(kaddr + offset);
861			curseg->sum_blk->entries[j] = *s;
862			offset += SUMMARY_SIZE;
863			if (offset + SUMMARY_SIZE <=
864					PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
865				continue;
866			memset(kaddr, 0, PAGE_SIZE);
867			ret = dev_read_block(kaddr, start++);
868			ASSERT(ret >= 0);
869			offset = 0;
870		}
871	}
872	free(kaddr);
873}
874
875static void restore_node_summary(struct f2fs_sb_info *sbi,
876		unsigned int segno, struct f2fs_summary_block *sum_blk)
877{
878	struct f2fs_node *node_blk;
879	struct f2fs_summary *sum_entry;
880	block_t addr;
881	unsigned int i;
882	int ret;
883
884	node_blk = malloc(F2FS_BLKSIZE);
885	ASSERT(node_blk);
886
887	/* scan the node segment */
888	addr = START_BLOCK(sbi, segno);
889	sum_entry = &sum_blk->entries[0];
890
891	for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
892		ret = dev_read_block(node_blk, addr);
893		ASSERT(ret >= 0);
894		sum_entry->nid = node_blk->footer.nid;
895		addr++;
896	}
897	free(node_blk);
898}
899
900static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
901{
902	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
903	struct f2fs_summary_block *sum_blk;
904	struct curseg_info *curseg;
905	unsigned int segno = 0;
906	block_t blk_addr = 0;
907	int ret;
908
909	if (IS_DATASEG(type)) {
910		segno = get_cp(cur_data_segno[type]);
911		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
912			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
913		else
914			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
915	} else {
916		segno = get_cp(cur_node_segno[type - CURSEG_HOT_NODE]);
917		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
918			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
919							type - CURSEG_HOT_NODE);
920		else
921			blk_addr = GET_SUM_BLKADDR(sbi, segno);
922	}
923
924	sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE);
925	ret = dev_read_block(sum_blk, blk_addr);
926	ASSERT(ret >= 0);
927
928	if (IS_NODESEG(type) && !is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
929		restore_node_summary(sbi, segno, sum_blk);
930
931	curseg = CURSEG_I(sbi, type);
932	memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE);
933	reset_curseg(sbi, type);
934	free(sum_blk);
935}
936
937void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
938					struct f2fs_summary *sum)
939{
940	struct f2fs_summary_block *sum_blk;
941	u32 segno, offset;
942	int type, ret;
943	struct seg_entry *se;
944
945	segno = GET_SEGNO(sbi, blk_addr);
946	offset = OFFSET_IN_SEG(sbi, blk_addr);
947
948	se = get_seg_entry(sbi, segno);
949
950	sum_blk = get_sum_block(sbi, segno, &type);
951	memcpy(&sum_blk->entries[offset], sum, sizeof(*sum));
952	sum_blk->footer.entry_type = IS_NODESEG(se->type) ? SUM_TYPE_NODE :
953							SUM_TYPE_DATA;
954
955	/* write SSA all the time */
956	if (type < SEG_TYPE_MAX) {
957		u64 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
958		ret = dev_write_block(sum_blk, ssa_blk);
959		ASSERT(ret >= 0);
960	}
961
962	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
963					type == SEG_TYPE_MAX)
964		free(sum_blk);
965}
966
967static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
968{
969	int type = CURSEG_HOT_DATA;
970
971	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
972		read_compacted_summaries(sbi);
973		type = CURSEG_HOT_NODE;
974	}
975
976	for (; type <= CURSEG_COLD_NODE; type++)
977		read_normal_summaries(sbi, type);
978}
979
980static void build_curseg(struct f2fs_sb_info *sbi)
981{
982	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
983	struct curseg_info *array;
984	unsigned short blk_off;
985	unsigned int segno;
986	int i;
987
988	array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
989	ASSERT(array);
990
991	SM_I(sbi)->curseg_array = array;
992
993	for (i = 0; i < NR_CURSEG_TYPE; i++) {
994		array[i].sum_blk = malloc(PAGE_CACHE_SIZE);
995		ASSERT(array[i].sum_blk);
996		if (i <= CURSEG_COLD_DATA) {
997			blk_off = get_cp(cur_data_blkoff[i]);
998			segno = get_cp(cur_data_segno[i]);
999		}
1000		if (i > CURSEG_COLD_DATA) {
1001			blk_off = get_cp(cur_node_blkoff[i - CURSEG_HOT_NODE]);
1002			segno = get_cp(cur_node_segno[i - CURSEG_HOT_NODE]);
1003		}
1004		array[i].segno = segno;
1005		array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
1006		array[i].next_segno = NULL_SEGNO;
1007		array[i].next_blkoff = blk_off;
1008		array[i].alloc_type = cp->alloc_type[i];
1009	}
1010	restore_curseg_summaries(sbi);
1011}
1012
1013static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
1014{
1015	unsigned int end_segno = SM_I(sbi)->segment_count - 1;
1016	ASSERT(segno <= end_segno);
1017}
1018
1019static struct f2fs_sit_block *get_current_sit_page(struct f2fs_sb_info *sbi,
1020						unsigned int segno)
1021{
1022	struct sit_info *sit_i = SIT_I(sbi);
1023	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1024	block_t blk_addr = sit_i->sit_base_addr + offset;
1025	struct f2fs_sit_block *sit_blk = calloc(BLOCK_SZ, 1);
1026	int ret;
1027
1028	check_seg_range(sbi, segno);
1029
1030	/* calculate sit block address */
1031	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1032		blk_addr += sit_i->sit_blocks;
1033
1034	ret = dev_read_block(sit_blk, blk_addr);
1035	ASSERT(ret >= 0);
1036
1037	return sit_blk;
1038}
1039
1040void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
1041			unsigned int segno, struct f2fs_sit_block *sit_blk)
1042{
1043	struct sit_info *sit_i = SIT_I(sbi);
1044	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1045	block_t blk_addr = sit_i->sit_base_addr + offset;
1046	int ret;
1047
1048	/* calculate sit block address */
1049	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1050		blk_addr += sit_i->sit_blocks;
1051
1052	ret = dev_write_block(sit_blk, blk_addr);
1053	ASSERT(ret >= 0);
1054}
1055
1056void check_block_count(struct f2fs_sb_info *sbi,
1057		unsigned int segno, struct f2fs_sit_entry *raw_sit)
1058{
1059	struct f2fs_sm_info *sm_info = SM_I(sbi);
1060	unsigned int end_segno = sm_info->segment_count - 1;
1061	int valid_blocks = 0;
1062	unsigned int i;
1063
1064	/* check segment usage */
1065	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
1066		ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
1067				segno, GET_SIT_VBLOCKS(raw_sit));
1068
1069	/* check boundary of a given segment number */
1070	if (segno > end_segno)
1071		ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
1072
1073	/* check bitmap with valid block count */
1074	for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1075		valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
1076
1077	if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
1078		ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
1079				segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
1080
1081	if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
1082		ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
1083				segno, GET_SIT_TYPE(raw_sit));
1084}
1085
1086void seg_info_from_raw_sit(struct seg_entry *se,
1087		struct f2fs_sit_entry *raw_sit)
1088{
1089	se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
1090	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(raw_sit);
1091	memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
1092	memcpy(se->ckpt_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
1093	se->type = GET_SIT_TYPE(raw_sit);
1094	se->orig_type = GET_SIT_TYPE(raw_sit);
1095	se->mtime = le64_to_cpu(raw_sit->mtime);
1096}
1097
1098struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
1099		unsigned int segno)
1100{
1101	struct sit_info *sit_i = SIT_I(sbi);
1102	return &sit_i->sentries[segno];
1103}
1104
1105struct f2fs_summary_block *get_sum_block(struct f2fs_sb_info *sbi,
1106				unsigned int segno, int *ret_type)
1107{
1108	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1109	struct f2fs_summary_block *sum_blk;
1110	struct curseg_info *curseg;
1111	int type, ret;
1112	u64 ssa_blk;
1113
1114	*ret_type= SEG_TYPE_MAX;
1115
1116	ssa_blk = GET_SUM_BLKADDR(sbi, segno);
1117	for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
1118		if (segno == get_cp(cur_node_segno[type])) {
1119			curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
1120			if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
1121				ASSERT_MSG("segno [0x%x] indicates a data "
1122						"segment, but should be node",
1123						segno);
1124				*ret_type = -SEG_TYPE_CUR_NODE;
1125			} else {
1126				*ret_type = SEG_TYPE_CUR_NODE;
1127			}
1128			return curseg->sum_blk;
1129		}
1130	}
1131
1132	for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
1133		if (segno == get_cp(cur_data_segno[type])) {
1134			curseg = CURSEG_I(sbi, type);
1135			if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
1136				ASSERT_MSG("segno [0x%x] indicates a node "
1137						"segment, but should be data",
1138						segno);
1139				*ret_type = -SEG_TYPE_CUR_DATA;
1140			} else {
1141				*ret_type = SEG_TYPE_CUR_DATA;
1142			}
1143			return curseg->sum_blk;
1144		}
1145	}
1146
1147	sum_blk = calloc(BLOCK_SZ, 1);
1148	ASSERT(sum_blk);
1149
1150	ret = dev_read_block(sum_blk, ssa_blk);
1151	ASSERT(ret >= 0);
1152
1153	if (IS_SUM_NODE_SEG(sum_blk->footer))
1154		*ret_type = SEG_TYPE_NODE;
1155	else if (IS_SUM_DATA_SEG(sum_blk->footer))
1156		*ret_type = SEG_TYPE_DATA;
1157
1158	return sum_blk;
1159}
1160
1161int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
1162				struct f2fs_summary *sum_entry)
1163{
1164	struct f2fs_summary_block *sum_blk;
1165	u32 segno, offset;
1166	int type;
1167
1168	segno = GET_SEGNO(sbi, blk_addr);
1169	offset = OFFSET_IN_SEG(sbi, blk_addr);
1170
1171	sum_blk = get_sum_block(sbi, segno, &type);
1172	memcpy(sum_entry, &(sum_blk->entries[offset]),
1173				sizeof(struct f2fs_summary));
1174	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
1175					type == SEG_TYPE_MAX)
1176		free(sum_blk);
1177	return type;
1178}
1179
1180static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
1181				struct f2fs_nat_entry *raw_nat)
1182{
1183	struct f2fs_nat_block *nat_block;
1184	pgoff_t block_addr;
1185	int entry_off;
1186	int ret;
1187
1188	if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
1189		return;
1190
1191	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1192
1193	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1194	block_addr = current_nat_addr(sbi, nid);
1195
1196	ret = dev_read_block(nat_block, block_addr);
1197	ASSERT(ret >= 0);
1198
1199	memcpy(raw_nat, &nat_block->entries[entry_off],
1200					sizeof(struct f2fs_nat_entry));
1201	free(nat_block);
1202}
1203
1204void update_data_blkaddr(struct f2fs_sb_info *sbi, nid_t nid,
1205				u16 ofs_in_node, block_t newaddr)
1206{
1207	struct f2fs_node *node_blk = NULL;
1208	struct node_info ni;
1209	block_t oldaddr, startaddr, endaddr;
1210	int ret;
1211
1212	node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
1213	ASSERT(node_blk != NULL);
1214
1215	get_node_info(sbi, nid, &ni);
1216
1217	/* read node_block */
1218	ret = dev_read_block(node_blk, ni.blk_addr);
1219	ASSERT(ret >= 0);
1220
1221	/* check its block address */
1222	if (node_blk->footer.nid == node_blk->footer.ino) {
1223		oldaddr = le32_to_cpu(node_blk->i.i_addr[ofs_in_node]);
1224		node_blk->i.i_addr[ofs_in_node] = cpu_to_le32(newaddr);
1225	} else {
1226		oldaddr = le32_to_cpu(node_blk->dn.addr[ofs_in_node]);
1227		node_blk->dn.addr[ofs_in_node] = cpu_to_le32(newaddr);
1228	}
1229
1230	ret = dev_write_block(node_blk, ni.blk_addr);
1231	ASSERT(ret >= 0);
1232
1233	/* check extent cache entry */
1234	if (node_blk->footer.nid != node_blk->footer.ino) {
1235		get_node_info(sbi, le32_to_cpu(node_blk->footer.ino), &ni);
1236
1237		/* read inode block */
1238		ret = dev_read_block(node_blk, ni.blk_addr);
1239		ASSERT(ret >= 0);
1240	}
1241
1242	startaddr = le32_to_cpu(node_blk->i.i_ext.blk_addr);
1243	endaddr = startaddr + le32_to_cpu(node_blk->i.i_ext.len);
1244	if (oldaddr >= startaddr && oldaddr < endaddr) {
1245		node_blk->i.i_ext.len = 0;
1246
1247		/* update inode block */
1248		ret = dev_write_block(node_blk, ni.blk_addr);
1249		ASSERT(ret >= 0);
1250	}
1251	free(node_blk);
1252}
1253
1254void update_nat_blkaddr(struct f2fs_sb_info *sbi, nid_t ino,
1255					nid_t nid, block_t newaddr)
1256{
1257	struct f2fs_nat_block *nat_block;
1258	pgoff_t block_addr;
1259	int entry_off;
1260	int ret;
1261
1262	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1263
1264	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1265	block_addr = current_nat_addr(sbi, nid);
1266
1267	ret = dev_read_block(nat_block, block_addr);
1268	ASSERT(ret >= 0);
1269
1270	if (ino)
1271		nat_block->entries[entry_off].ino = cpu_to_le32(ino);
1272	nat_block->entries[entry_off].block_addr = cpu_to_le32(newaddr);
1273
1274	ret = dev_write_block(nat_block, block_addr);
1275	ASSERT(ret >= 0);
1276	free(nat_block);
1277}
1278
1279void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
1280{
1281	struct f2fs_nat_entry raw_nat;
1282	get_nat_entry(sbi, nid, &raw_nat);
1283	ni->nid = nid;
1284	node_info_from_raw_nat(ni, &raw_nat);
1285}
1286
1287void build_sit_entries(struct f2fs_sb_info *sbi)
1288{
1289	struct sit_info *sit_i = SIT_I(sbi);
1290	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1291	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1292	struct seg_entry *se;
1293	struct f2fs_sit_entry sit;
1294	unsigned int i, segno;
1295
1296	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1297		se = &sit_i->sentries[segno];
1298		struct f2fs_sit_block *sit_blk;
1299
1300		sit_blk = get_current_sit_page(sbi, segno);
1301		sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1302		free(sit_blk);
1303
1304		check_block_count(sbi, segno, &sit);
1305		seg_info_from_raw_sit(se, &sit);
1306	}
1307
1308	for (i = 0; i < sits_in_cursum(journal); i++) {
1309		segno = le32_to_cpu(segno_in_journal(journal, i));
1310		se = &sit_i->sentries[segno];
1311		sit = sit_in_journal(journal, i);
1312
1313		check_block_count(sbi, segno, &sit);
1314		seg_info_from_raw_sit(se, &sit);
1315	}
1316
1317}
1318
1319int build_segment_manager(struct f2fs_sb_info *sbi)
1320{
1321	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1322	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1323	struct f2fs_sm_info *sm_info;
1324
1325	sm_info = malloc(sizeof(struct f2fs_sm_info));
1326	if (!sm_info)
1327		return -ENOMEM;
1328
1329	/* init sm info */
1330	sbi->sm_info = sm_info;
1331	sm_info->seg0_blkaddr = get_sb(segment0_blkaddr);
1332	sm_info->main_blkaddr = get_sb(main_blkaddr);
1333	sm_info->segment_count = get_sb(segment_count);
1334	sm_info->reserved_segments = get_cp(rsvd_segment_count);
1335	sm_info->ovp_segments = get_cp(overprov_segment_count);
1336	sm_info->main_segments = get_sb(segment_count_main);
1337	sm_info->ssa_blkaddr = get_sb(ssa_blkaddr);
1338
1339	build_sit_info(sbi);
1340
1341	build_curseg(sbi);
1342
1343	build_sit_entries(sbi);
1344
1345	return 0;
1346}
1347
1348void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
1349{
1350	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1351	struct f2fs_sm_info *sm_i = SM_I(sbi);
1352	unsigned int segno = 0;
1353	char *ptr = NULL;
1354	u32 sum_vblocks = 0;
1355	u32 free_segs = 0;
1356	struct seg_entry *se;
1357
1358	fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
1359	fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
1360	ptr = fsck->sit_area_bitmap;
1361
1362	ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
1363
1364	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1365		se = get_seg_entry(sbi, segno);
1366
1367		memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1368		ptr += SIT_VBLOCK_MAP_SIZE;
1369
1370		if (se->valid_blocks == 0x0) {
1371			if (sbi->ckpt->cur_node_segno[0] == segno ||
1372					sbi->ckpt->cur_data_segno[0] == segno ||
1373					sbi->ckpt->cur_node_segno[1] == segno ||
1374					sbi->ckpt->cur_data_segno[1] == segno ||
1375					sbi->ckpt->cur_node_segno[2] == segno ||
1376					sbi->ckpt->cur_data_segno[2] == segno) {
1377				continue;
1378			} else {
1379				free_segs++;
1380			}
1381		} else {
1382			sum_vblocks += se->valid_blocks;
1383		}
1384	}
1385	fsck->chk.sit_valid_blocks = sum_vblocks;
1386	fsck->chk.sit_free_segs = free_segs;
1387
1388	DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
1389			sum_vblocks, sum_vblocks,
1390			free_segs, free_segs);
1391}
1392
1393void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
1394{
1395	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1396	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1397	struct sit_info *sit_i = SIT_I(sbi);
1398	unsigned int segno = 0;
1399	struct f2fs_summary_block *sum = curseg->sum_blk;
1400	char *ptr = NULL;
1401
1402	/* remove sit journal */
1403	sum->journal.n_sits = 0;
1404
1405	fsck->chk.free_segs = 0;
1406
1407	ptr = fsck->main_area_bitmap;
1408
1409	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1410		struct f2fs_sit_block *sit_blk;
1411		struct f2fs_sit_entry *sit;
1412		struct seg_entry *se;
1413		u16 valid_blocks = 0;
1414		u16 type;
1415		int i;
1416
1417		sit_blk = get_current_sit_page(sbi, segno);
1418		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1419		memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
1420
1421		/* update valid block count */
1422		for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1423			valid_blocks += get_bits_in_byte(sit->valid_map[i]);
1424
1425		se = get_seg_entry(sbi, segno);
1426		type = se->type;
1427		if (type >= NO_CHECK_TYPE) {
1428			ASSERT_MSG("Invalide type and valid blocks=%x,%x",
1429					segno, valid_blocks);
1430			type = 0;
1431		}
1432		sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
1433								valid_blocks);
1434		rewrite_current_sit_page(sbi, segno, sit_blk);
1435		free(sit_blk);
1436
1437		if (valid_blocks == 0 &&
1438				sbi->ckpt->cur_node_segno[0] != segno &&
1439				sbi->ckpt->cur_data_segno[0] != segno &&
1440				sbi->ckpt->cur_node_segno[1] != segno &&
1441				sbi->ckpt->cur_data_segno[1] != segno &&
1442				sbi->ckpt->cur_node_segno[2] != segno &&
1443				sbi->ckpt->cur_data_segno[2] != segno)
1444			fsck->chk.free_segs++;
1445
1446		ptr += SIT_VBLOCK_MAP_SIZE;
1447	}
1448}
1449
1450static void flush_sit_journal_entries(struct f2fs_sb_info *sbi)
1451{
1452	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1453	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1454	struct sit_info *sit_i = SIT_I(sbi);
1455	unsigned int segno;
1456	int i;
1457
1458	for (i = 0; i < sits_in_cursum(journal); i++) {
1459		struct f2fs_sit_block *sit_blk;
1460		struct f2fs_sit_entry *sit;
1461		struct seg_entry *se;
1462
1463		segno = segno_in_journal(journal, i);
1464		se = get_seg_entry(sbi, segno);
1465
1466		sit_blk = get_current_sit_page(sbi, segno);
1467		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1468
1469		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1470		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
1471							se->valid_blocks);
1472		sit->mtime = cpu_to_le64(se->mtime);
1473
1474		rewrite_current_sit_page(sbi, segno, sit_blk);
1475		free(sit_blk);
1476	}
1477	journal->n_sits = 0;
1478}
1479
1480static void flush_nat_journal_entries(struct f2fs_sb_info *sbi)
1481{
1482	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1483	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1484	struct f2fs_nat_block *nat_block;
1485	pgoff_t block_addr;
1486	int entry_off;
1487	nid_t nid;
1488	int ret;
1489	int i = 0;
1490
1491next:
1492	if (i >= nats_in_cursum(journal)) {
1493		journal->n_nats = 0;
1494		return;
1495	}
1496
1497	nid = le32_to_cpu(nid_in_journal(journal, i));
1498	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1499
1500	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1501	block_addr = current_nat_addr(sbi, nid);
1502
1503	ret = dev_read_block(nat_block, block_addr);
1504	ASSERT(ret >= 0);
1505
1506	memcpy(&nat_block->entries[entry_off], &nat_in_journal(journal, i),
1507					sizeof(struct f2fs_nat_entry));
1508
1509	ret = dev_write_block(nat_block, block_addr);
1510	ASSERT(ret >= 0);
1511	free(nat_block);
1512	i++;
1513	goto next;
1514}
1515
1516void flush_journal_entries(struct f2fs_sb_info *sbi)
1517{
1518	flush_nat_journal_entries(sbi);
1519	flush_sit_journal_entries(sbi);
1520	write_checkpoint(sbi);
1521}
1522
1523void flush_sit_entries(struct f2fs_sb_info *sbi)
1524{
1525	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1526	struct sit_info *sit_i = SIT_I(sbi);
1527	unsigned int segno = 0;
1528	u32 free_segs = 0;
1529
1530	/* update free segments */
1531	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1532		struct f2fs_sit_block *sit_blk;
1533		struct f2fs_sit_entry *sit;
1534		struct seg_entry *se;
1535
1536		se = get_seg_entry(sbi, segno);
1537
1538		if (!se->dirty)
1539			continue;
1540
1541		sit_blk = get_current_sit_page(sbi, segno);
1542		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1543		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1544		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
1545							se->valid_blocks);
1546		rewrite_current_sit_page(sbi, segno, sit_blk);
1547		free(sit_blk);
1548
1549		if (se->valid_blocks == 0x0 &&
1550				!IS_CUR_SEGNO(sbi, segno, NO_CHECK_TYPE))
1551			free_segs++;
1552	}
1553
1554	set_cp(free_segment_count, free_segs);
1555}
1556
1557int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left, int type)
1558{
1559	struct seg_entry *se;
1560	u32 segno;
1561	u64 offset;
1562
1563	if (get_free_segments(sbi) <= SM_I(sbi)->reserved_segments + 1)
1564		return -1;
1565
1566	while (*to >= SM_I(sbi)->main_blkaddr &&
1567			*to < F2FS_RAW_SUPER(sbi)->block_count) {
1568		segno = GET_SEGNO(sbi, *to);
1569		offset = OFFSET_IN_SEG(sbi, *to);
1570
1571		se = get_seg_entry(sbi, segno);
1572
1573		if (se->valid_blocks == sbi->blocks_per_seg ||
1574				IS_CUR_SEGNO(sbi, segno, type)) {
1575			*to = left ? START_BLOCK(sbi, segno) - 1:
1576						START_BLOCK(sbi, segno + 1);
1577			continue;
1578		}
1579		if (se->valid_blocks == 0 && !(segno % sbi->segs_per_sec)) {
1580			struct seg_entry *se2;
1581			unsigned int i;
1582
1583			for (i = 1; i < sbi->segs_per_sec; i++) {
1584				se2 = get_seg_entry(sbi, segno + i);
1585				if (se2->valid_blocks)
1586					break;
1587			}
1588			if (i == sbi->segs_per_sec)
1589				return 0;
1590		}
1591
1592		if (se->type == type &&
1593			!f2fs_test_bit(offset, (const char *)se->cur_valid_map))
1594			return 0;
1595
1596		*to = left ? *to - 1: *to + 1;
1597	}
1598	return -1;
1599}
1600
1601void move_curseg_info(struct f2fs_sb_info *sbi, u64 from)
1602{
1603	int i, ret;
1604
1605	/* update summary blocks having nullified journal entries */
1606	for (i = 0; i < NO_CHECK_TYPE; i++) {
1607		struct curseg_info *curseg = CURSEG_I(sbi, i);
1608		struct f2fs_summary_block buf;
1609		u32 old_segno;
1610		u64 ssa_blk, to;
1611
1612		/* update original SSA too */
1613		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
1614		ret = dev_write_block(curseg->sum_blk, ssa_blk);
1615		ASSERT(ret >= 0);
1616
1617		to = from;
1618		ret = find_next_free_block(sbi, &to, 0, i);
1619		ASSERT(ret == 0);
1620
1621		old_segno = curseg->segno;
1622		curseg->segno = GET_SEGNO(sbi, to);
1623		curseg->next_blkoff = OFFSET_IN_SEG(sbi, to);
1624		curseg->alloc_type = SSR;
1625
1626		/* update new segno */
1627		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
1628		ret = dev_read_block(&buf, ssa_blk);
1629		ASSERT(ret >= 0);
1630
1631		memcpy(curseg->sum_blk, &buf, SUM_ENTRIES_SIZE);
1632
1633		/* update se->types */
1634		reset_curseg(sbi, i);
1635
1636		DBG(1, "Move curseg[%d] %x -> %x after %"PRIx64"\n",
1637				i, old_segno, curseg->segno, from);
1638	}
1639}
1640
1641void zero_journal_entries(struct f2fs_sb_info *sbi)
1642{
1643	int i;
1644
1645	for (i = 0; i < NO_CHECK_TYPE; i++)
1646		CURSEG_I(sbi, i)->sum_blk->journal.n_nats = 0;
1647}
1648
1649void write_curseg_info(struct f2fs_sb_info *sbi)
1650{
1651	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1652	int i;
1653
1654	for (i = 0; i < NO_CHECK_TYPE; i++) {
1655		cp->alloc_type[i] = CURSEG_I(sbi, i)->alloc_type;
1656		if (i < CURSEG_HOT_NODE) {
1657			set_cp(cur_data_segno[i], CURSEG_I(sbi, i)->segno);
1658			set_cp(cur_data_blkoff[i],
1659					CURSEG_I(sbi, i)->next_blkoff);
1660		} else {
1661			int n = i - CURSEG_HOT_NODE;
1662
1663			set_cp(cur_node_segno[n], CURSEG_I(sbi, i)->segno);
1664			set_cp(cur_node_blkoff[n],
1665					CURSEG_I(sbi, i)->next_blkoff);
1666		}
1667	}
1668}
1669
1670int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
1671					struct f2fs_nat_entry *raw_nat)
1672{
1673	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1674	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1675	int i = 0;
1676
1677	for (i = 0; i < nats_in_cursum(journal); i++) {
1678		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
1679			memcpy(raw_nat, &nat_in_journal(journal, i),
1680						sizeof(struct f2fs_nat_entry));
1681			DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
1682			return i;
1683		}
1684	}
1685	return -1;
1686}
1687
1688void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
1689{
1690	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1691	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1692	struct f2fs_nat_block *nat_block;
1693	pgoff_t block_addr;
1694	int entry_off;
1695	int ret;
1696	int i = 0;
1697
1698	/* check in journal */
1699	for (i = 0; i < nats_in_cursum(journal); i++) {
1700		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
1701			memset(&nat_in_journal(journal, i), 0,
1702					sizeof(struct f2fs_nat_entry));
1703			FIX_MSG("Remove nid [0x%x] in nat journal\n", nid);
1704			return;
1705		}
1706	}
1707	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1708
1709	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1710	block_addr = current_nat_addr(sbi, nid);
1711
1712	ret = dev_read_block(nat_block, block_addr);
1713	ASSERT(ret >= 0);
1714
1715	memset(&nat_block->entries[entry_off], 0,
1716					sizeof(struct f2fs_nat_entry));
1717
1718	ret = dev_write_block(nat_block, block_addr);
1719	ASSERT(ret >= 0);
1720	free(nat_block);
1721}
1722
1723void write_checkpoint(struct f2fs_sb_info *sbi)
1724{
1725	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1726	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1727	block_t orphan_blks = 0;
1728	unsigned long long cp_blk_no;
1729	u32 flags = CP_UMOUNT_FLAG;
1730	int i, ret;
1731	u_int32_t crc = 0;
1732
1733	if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
1734		orphan_blks = __start_sum_addr(sbi) - 1;
1735		flags |= CP_ORPHAN_PRESENT_FLAG;
1736	}
1737
1738	set_cp(ckpt_flags, flags);
1739
1740	set_cp(free_segment_count, get_free_segments(sbi));
1741	set_cp(valid_block_count, sbi->total_valid_block_count);
1742	set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_sb(cp_payload));
1743
1744	crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, CHECKSUM_OFFSET);
1745	*((__le32 *)((unsigned char *)cp + CHECKSUM_OFFSET)) = cpu_to_le32(crc);
1746
1747	cp_blk_no = get_sb(cp_blkaddr);
1748	if (sbi->cur_cp == 2)
1749		cp_blk_no += 1 << get_sb(log_blocks_per_seg);
1750
1751	/* write the first cp */
1752	ret = dev_write_block(cp, cp_blk_no++);
1753	ASSERT(ret >= 0);
1754
1755	/* skip payload */
1756	cp_blk_no += get_sb(cp_payload);
1757	/* skip orphan blocks */
1758	cp_blk_no += orphan_blks;
1759
1760	/* update summary blocks having nullified journal entries */
1761	for (i = 0; i < NO_CHECK_TYPE; i++) {
1762		struct curseg_info *curseg = CURSEG_I(sbi, i);
1763		u64 ssa_blk;
1764
1765		ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
1766		ASSERT(ret >= 0);
1767
1768		/* update original SSA too */
1769		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
1770		ret = dev_write_block(curseg->sum_blk, ssa_blk);
1771		ASSERT(ret >= 0);
1772	}
1773
1774	/* write the last cp */
1775	ret = dev_write_block(cp, cp_blk_no++);
1776	ASSERT(ret >= 0);
1777}
1778
1779void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
1780{
1781	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1782	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1783	struct f2fs_nm_info *nm_i = NM_I(sbi);
1784	struct f2fs_nat_block *nat_block;
1785	u32 nid, nr_nat_blks;
1786	pgoff_t block_off;
1787	pgoff_t block_addr;
1788	int seg_off;
1789	int ret;
1790	unsigned int i;
1791
1792	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1793	ASSERT(nat_block);
1794
1795	/* Alloc & build nat entry bitmap */
1796	nr_nat_blks = (get_sb(segment_count_nat) / 2) <<
1797					sbi->log_blocks_per_seg;
1798
1799	fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
1800	fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
1801	fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
1802	ASSERT(fsck->nat_area_bitmap != NULL);
1803
1804	fsck->entries = calloc(sizeof(struct f2fs_nat_entry),
1805					fsck->nr_nat_entries);
1806	ASSERT(fsck->entries);
1807
1808	for (block_off = 0; block_off < nr_nat_blks; block_off++) {
1809
1810		seg_off = block_off >> sbi->log_blocks_per_seg;
1811		block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1812			(seg_off << sbi->log_blocks_per_seg << 1) +
1813			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
1814
1815		if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
1816			block_addr += sbi->blocks_per_seg;
1817
1818		ret = dev_read_block(nat_block, block_addr);
1819		ASSERT(ret >= 0);
1820
1821		nid = block_off * NAT_ENTRY_PER_BLOCK;
1822		for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
1823			struct f2fs_nat_entry raw_nat;
1824			struct node_info ni;
1825			ni.nid = nid + i;
1826
1827			if ((nid + i) == F2FS_NODE_INO(sbi) ||
1828					(nid + i) == F2FS_META_INO(sbi)) {
1829				/* block_addr of node/meta inode should be 0x1 */
1830				if (le32_to_cpu(nat_block->entries[i].block_addr) != 0x1) {
1831					FIX_MSG("ino: 0x%x node/meta inode, block_addr= 0x%x -> 0x1",
1832							nid + i, le32_to_cpu(nat_block->entries[i].block_addr));
1833					nat_block->entries[i].block_addr = cpu_to_le32(0x1);
1834					ret = dev_write_block(nat_block, block_addr);
1835					ASSERT(ret >= 0);
1836				}
1837				continue;
1838			}
1839
1840			if (lookup_nat_in_journal(sbi, nid + i,
1841							&raw_nat) >= 0) {
1842				node_info_from_raw_nat(&ni, &raw_nat);
1843				if ((ni.ino == 0x0 && ni.blk_addr != 0x0))
1844					ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
1845						" is invalid\n",
1846						ni.ino, ni.blk_addr);
1847				if (ni.ino == (nid + i) && ni.blk_addr != 0) {
1848					fsck->nat_valid_inode_cnt++;
1849					DBG(3, "ino[0x%8x] maybe is inode\n",
1850								ni.ino);
1851				}
1852				if (ni.blk_addr != 0x0) {
1853					f2fs_set_bit(nid + i,
1854							fsck->nat_area_bitmap);
1855					fsck->chk.valid_nat_entry_cnt++;
1856					DBG(3, "nid[0x%x] in nat cache\n",
1857								nid + i);
1858				}
1859
1860				fsck->entries[nid + i] = raw_nat;
1861			} else {
1862				node_info_from_raw_nat(&ni,
1863						&nat_block->entries[i]);
1864				if ((ni.ino == 0x0 && ni.blk_addr != 0x0))
1865					ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
1866						" is invalid\n",
1867						ni.ino, ni.blk_addr);
1868				if (ni.ino == (nid + i) && ni.blk_addr != 0) {
1869					fsck->nat_valid_inode_cnt++;
1870					DBG(3, "ino[0x%8x] maybe is inode\n",
1871								ni.ino);
1872				}
1873				if (ni.blk_addr == 0)
1874					continue;
1875				if (nid + i == 0) {
1876					/*
1877					 * nat entry [0] must be null.  If
1878					 * it is corrupted, set its bit in
1879					 * nat_area_bitmap, fsck_verify will
1880					 * nullify it
1881					 */
1882					ASSERT_MSG("Invalid nat entry[0]: "
1883						"blk_addr[0x%x]\n",
1884						ni.blk_addr);
1885					c.fix_on = 1;
1886					fsck->chk.valid_nat_entry_cnt--;
1887				}
1888
1889				DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
1890					nid + i, ni.blk_addr, ni.ino);
1891				f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
1892				fsck->chk.valid_nat_entry_cnt++;
1893
1894				fsck->entries[nid + i] = nat_block->entries[i];
1895			}
1896		}
1897	}
1898	free(nat_block);
1899
1900	DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
1901			fsck->chk.valid_nat_entry_cnt,
1902			fsck->chk.valid_nat_entry_cnt);
1903}
1904
1905static int check_sector_size(struct f2fs_super_block *sb)
1906{
1907	int index;
1908	u_int32_t log_sectorsize, log_sectors_per_block;
1909	u_int8_t *zero_buff;
1910
1911	log_sectorsize = log_base_2(c.sector_size);
1912	log_sectors_per_block = log_base_2(c.sectors_per_blk);
1913
1914	if (log_sectorsize == get_sb(log_sectorsize) &&
1915			log_sectors_per_block == get_sb(log_sectors_per_block))
1916		return 0;
1917
1918	zero_buff = calloc(F2FS_BLKSIZE, 1);
1919	ASSERT(zero_buff);
1920
1921	set_sb(log_sectorsize, log_sectorsize);
1922	set_sb(log_sectors_per_block, log_sectors_per_block);
1923
1924	memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
1925	DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
1926	for (index = 0; index < 2; index++) {
1927		if (dev_write(zero_buff, index * F2FS_BLKSIZE, F2FS_BLKSIZE)) {
1928			MSG(1, "\tError: Failed while writing supe_blk "
1929				"on disk!!! index : %d\n", index);
1930			free(zero_buff);
1931			return -1;
1932		}
1933	}
1934
1935	free(zero_buff);
1936	return 0;
1937}
1938
1939int f2fs_do_mount(struct f2fs_sb_info *sbi)
1940{
1941	struct f2fs_checkpoint *cp = NULL;
1942	int ret;
1943
1944	sbi->active_logs = NR_CURSEG_TYPE;
1945	ret = validate_super_block(sbi, 0);
1946	if (ret) {
1947		ret = validate_super_block(sbi, 1);
1948		if (ret)
1949			return -1;
1950	}
1951
1952	ret = check_sector_size(sbi->raw_super);
1953	if (ret)
1954		return -1;
1955
1956	print_raw_sb_info(F2FS_RAW_SUPER(sbi));
1957
1958	init_sb_info(sbi);
1959
1960	ret = get_valid_checkpoint(sbi);
1961	if (ret) {
1962		ERR_MSG("Can't find valid checkpoint\n");
1963		return -1;
1964	}
1965
1966	if (sanity_check_ckpt(sbi)) {
1967		ERR_MSG("Checkpoint is polluted\n");
1968		return -1;
1969	}
1970	cp = F2FS_CKPT(sbi);
1971
1972	print_ckpt_info(sbi);
1973
1974	if (c.auto_fix || c.preen_mode) {
1975		u32 flag = get_cp(ckpt_flags);
1976
1977		if (flag & CP_FSCK_FLAG)
1978			c.fix_on = 1;
1979		else if (!c.preen_mode)
1980			return 1;
1981	}
1982
1983	c.bug_on = 0;
1984
1985	sbi->total_valid_node_count = get_cp(valid_node_count);
1986	sbi->total_valid_inode_count = get_cp(valid_inode_count);
1987	sbi->user_block_count = get_cp(user_block_count);
1988	sbi->total_valid_block_count = get_cp(valid_block_count);
1989	sbi->last_valid_block_count = sbi->total_valid_block_count;
1990	sbi->alloc_valid_block_count = 0;
1991
1992	if (build_segment_manager(sbi)) {
1993		ERR_MSG("build_segment_manager failed\n");
1994		return -1;
1995	}
1996
1997	if (build_node_manager(sbi)) {
1998		ERR_MSG("build_node_manager failed\n");
1999		return -1;
2000	}
2001
2002	return 0;
2003}
2004
2005void f2fs_do_umount(struct f2fs_sb_info *sbi)
2006{
2007	struct sit_info *sit_i = SIT_I(sbi);
2008	struct f2fs_sm_info *sm_i = SM_I(sbi);
2009	struct f2fs_nm_info *nm_i = NM_I(sbi);
2010	unsigned int i;
2011
2012	/* free nm_info */
2013	if (c.func == SLOAD)
2014		free(nm_i->nid_bitmap);
2015	free(nm_i->nat_bitmap);
2016	free(sbi->nm_info);
2017
2018	/* free sit_info */
2019	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
2020		free(sit_i->sentries[i].cur_valid_map);
2021		free(sit_i->sentries[i].ckpt_valid_map);
2022	}
2023	free(sit_i->sit_bitmap);
2024	free(sm_i->sit_info);
2025
2026	/* free sm_info */
2027	for (i = 0; i < NR_CURSEG_TYPE; i++)
2028		free(sm_i->curseg_array[i].sum_blk);
2029
2030	free(sm_i->curseg_array);
2031	free(sbi->sm_info);
2032
2033	free(sbi->ckpt);
2034	free(sbi->raw_super);
2035}
2036