mount.c revision 2010f975eefe4bb74623a0699527bea4ba726c06
1/**
2 * mount.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 *             http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include "fsck.h"
12#include <locale.h>
13
14u32 get_free_segments(struct f2fs_sb_info *sbi)
15{
16	u32 i, free_segs = 0;
17
18	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
19		struct seg_entry *se = get_seg_entry(sbi, i);
20
21		if (se->valid_blocks == 0x0 &&
22				!IS_CUR_SEGNO(sbi, i, NO_CHECK_TYPE))
23			free_segs++;
24	}
25	return free_segs;
26}
27
28void update_free_segments(struct f2fs_sb_info *sbi)
29{
30	char *progress = "-*|*-";
31	static int i = 0;
32
33	MSG(0, "\r [ %c ] Free segments: 0x%x", progress[i % 5], get_free_segments(sbi));
34	fflush(stdout);
35	i++;
36}
37
38void print_inode_info(struct f2fs_inode *inode, int name)
39{
40	unsigned char en[F2FS_NAME_LEN + 1];
41	unsigned int i = 0;
42	int namelen = le32_to_cpu(inode->i_namelen);
43	int is_encrypt = file_is_encrypt(inode);
44
45	namelen = convert_encrypted_name(inode->i_name, namelen, en, is_encrypt);
46	en[namelen] = '\0';
47	if (name && namelen) {
48		inode->i_name[namelen] = '\0';
49		MSG(0, " - File name         : %s%s\n", en,
50				is_encrypt ? " <encrypted>" : "");
51		setlocale(LC_ALL, "");
52		MSG(0, " - File size         : %'llu (bytes)\n",
53				le64_to_cpu(inode->i_size));
54		return;
55	}
56
57	DISP_u32(inode, i_mode);
58	DISP_u32(inode, i_advise);
59	DISP_u32(inode, i_uid);
60	DISP_u32(inode, i_gid);
61	DISP_u32(inode, i_links);
62	DISP_u64(inode, i_size);
63	DISP_u64(inode, i_blocks);
64
65	DISP_u64(inode, i_atime);
66	DISP_u32(inode, i_atime_nsec);
67	DISP_u64(inode, i_ctime);
68	DISP_u32(inode, i_ctime_nsec);
69	DISP_u64(inode, i_mtime);
70	DISP_u32(inode, i_mtime_nsec);
71
72	DISP_u32(inode, i_generation);
73	DISP_u32(inode, i_current_depth);
74	DISP_u32(inode, i_xattr_nid);
75	DISP_u32(inode, i_flags);
76	DISP_u32(inode, i_inline);
77	DISP_u32(inode, i_pino);
78	DISP_u32(inode, i_dir_level);
79
80	if (namelen) {
81		DISP_u32(inode, i_namelen);
82		printf("%-30s\t\t[%s]\n", "i_name", en);
83	}
84
85	printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
86			le32_to_cpu(inode->i_ext.fofs),
87			le32_to_cpu(inode->i_ext.blk_addr),
88			le32_to_cpu(inode->i_ext.len));
89
90	DISP_u32(inode, i_addr[0]);	/* Pointers to data blocks */
91	DISP_u32(inode, i_addr[1]);	/* Pointers to data blocks */
92	DISP_u32(inode, i_addr[2]);	/* Pointers to data blocks */
93	DISP_u32(inode, i_addr[3]);	/* Pointers to data blocks */
94
95	for (i = 4; i < ADDRS_PER_INODE(inode); i++) {
96		if (inode->i_addr[i] != 0x0) {
97			printf("i_addr[0x%x] points data block\r\t\t[0x%4x]\n",
98					i, le32_to_cpu(inode->i_addr[i]));
99			break;
100		}
101	}
102
103	DISP_u32(inode, i_nid[0]);	/* direct */
104	DISP_u32(inode, i_nid[1]);	/* direct */
105	DISP_u32(inode, i_nid[2]);	/* indirect */
106	DISP_u32(inode, i_nid[3]);	/* indirect */
107	DISP_u32(inode, i_nid[4]);	/* double indirect */
108
109	printf("\n");
110}
111
112void print_node_info(struct f2fs_node *node_block, int verbose)
113{
114	nid_t ino = le32_to_cpu(node_block->footer.ino);
115	nid_t nid = le32_to_cpu(node_block->footer.nid);
116	/* Is this inode? */
117	if (ino == nid) {
118		DBG(verbose, "Node ID [0x%x:%u] is inode\n", nid, nid);
119		print_inode_info(&node_block->i, verbose);
120	} else {
121		int i;
122		u32 *dump_blk = (u32 *)node_block;
123		DBG(verbose,
124			"Node ID [0x%x:%u] is direct node or indirect node.\n",
125								nid, nid);
126		for (i = 0; i <= 10; i++)
127			MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
128						i, dump_blk[i], dump_blk[i]);
129	}
130}
131
132static void DISP_label(u_int16_t *name)
133{
134	char buffer[MAX_VOLUME_NAME];
135
136	utf16_to_utf8(buffer, name, MAX_VOLUME_NAME, MAX_VOLUME_NAME);
137	printf("%-30s" "\t\t[%s]\n", "volum_name", buffer);
138}
139
140void print_raw_sb_info(struct f2fs_super_block *sb)
141{
142	if (!c.dbg_lv)
143		return;
144
145	printf("\n");
146	printf("+--------------------------------------------------------+\n");
147	printf("| Super block                                            |\n");
148	printf("+--------------------------------------------------------+\n");
149
150	DISP_u32(sb, magic);
151	DISP_u32(sb, major_ver);
152
153	DISP_label(sb->volume_name);
154
155	DISP_u32(sb, minor_ver);
156	DISP_u32(sb, log_sectorsize);
157	DISP_u32(sb, log_sectors_per_block);
158
159	DISP_u32(sb, log_blocksize);
160	DISP_u32(sb, log_blocks_per_seg);
161	DISP_u32(sb, segs_per_sec);
162	DISP_u32(sb, secs_per_zone);
163	DISP_u32(sb, checksum_offset);
164	DISP_u64(sb, block_count);
165
166	DISP_u32(sb, section_count);
167	DISP_u32(sb, segment_count);
168	DISP_u32(sb, segment_count_ckpt);
169	DISP_u32(sb, segment_count_sit);
170	DISP_u32(sb, segment_count_nat);
171
172	DISP_u32(sb, segment_count_ssa);
173	DISP_u32(sb, segment_count_main);
174	DISP_u32(sb, segment0_blkaddr);
175
176	DISP_u32(sb, cp_blkaddr);
177	DISP_u32(sb, sit_blkaddr);
178	DISP_u32(sb, nat_blkaddr);
179	DISP_u32(sb, ssa_blkaddr);
180	DISP_u32(sb, main_blkaddr);
181
182	DISP_u32(sb, root_ino);
183	DISP_u32(sb, node_ino);
184	DISP_u32(sb, meta_ino);
185	DISP_u32(sb, cp_payload);
186	DISP("%s", sb, version);
187	printf("\n");
188}
189
190void print_ckpt_info(struct f2fs_sb_info *sbi)
191{
192	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
193
194	if (!c.dbg_lv)
195		return;
196
197	printf("\n");
198	printf("+--------------------------------------------------------+\n");
199	printf("| Checkpoint                                             |\n");
200	printf("+--------------------------------------------------------+\n");
201
202	DISP_u64(cp, checkpoint_ver);
203	DISP_u64(cp, user_block_count);
204	DISP_u64(cp, valid_block_count);
205	DISP_u32(cp, rsvd_segment_count);
206	DISP_u32(cp, overprov_segment_count);
207	DISP_u32(cp, free_segment_count);
208
209	DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
210	DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
211	DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
212	DISP_u32(cp, cur_node_segno[0]);
213	DISP_u32(cp, cur_node_segno[1]);
214	DISP_u32(cp, cur_node_segno[2]);
215
216	DISP_u32(cp, cur_node_blkoff[0]);
217	DISP_u32(cp, cur_node_blkoff[1]);
218	DISP_u32(cp, cur_node_blkoff[2]);
219
220
221	DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
222	DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
223	DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
224	DISP_u32(cp, cur_data_segno[0]);
225	DISP_u32(cp, cur_data_segno[1]);
226	DISP_u32(cp, cur_data_segno[2]);
227
228	DISP_u32(cp, cur_data_blkoff[0]);
229	DISP_u32(cp, cur_data_blkoff[1]);
230	DISP_u32(cp, cur_data_blkoff[2]);
231
232	DISP_u32(cp, ckpt_flags);
233	DISP_u32(cp, cp_pack_total_block_count);
234	DISP_u32(cp, cp_pack_start_sum);
235	DISP_u32(cp, valid_node_count);
236	DISP_u32(cp, valid_inode_count);
237	DISP_u32(cp, next_free_nid);
238	DISP_u32(cp, sit_ver_bitmap_bytesize);
239	DISP_u32(cp, nat_ver_bitmap_bytesize);
240	DISP_u32(cp, checksum_offset);
241	DISP_u64(cp, elapsed_time);
242
243	DISP_u32(cp, sit_nat_version_bitmap[0]);
244	printf("\n\n");
245}
246
247void print_cp_state(u32 flag)
248{
249	MSG(0, "Info: checkpoint state = %x : ", flag);
250	if (flag & CP_FSCK_FLAG)
251		MSG(0, "%s", " fsck");
252	if (flag & CP_ERROR_FLAG)
253		MSG(0, "%s", " error");
254	if (flag & CP_COMPACT_SUM_FLAG)
255		MSG(0, "%s", " compacted_summary");
256	if (flag & CP_ORPHAN_PRESENT_FLAG)
257		MSG(0, "%s", " orphan_inodes");
258	if (flag & CP_FASTBOOT_FLAG)
259		MSG(0, "%s", " fastboot");
260	if (flag & CP_UMOUNT_FLAG)
261		MSG(0, "%s", " unmount");
262	else
263		MSG(0, "%s", " sudden-power-off");
264	MSG(0, "\n");
265}
266
267void print_sb_state(struct f2fs_super_block *sb)
268{
269	__le32 f = sb->feature;
270	int i;
271
272	MSG(0, "Info: superblock features = %x : ", f);
273	if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
274		MSG(0, "%s", " encrypt");
275	}
276	if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
277		MSG(0, "%s", " zoned block device");
278	}
279	MSG(0, "\n");
280	MSG(0, "Info: superblock encrypt level = %d, salt = ",
281					sb->encryption_level);
282	for (i = 0; i < 16; i++)
283		MSG(0, "%02x", sb->encrypt_pw_salt[i]);
284	MSG(0, "\n");
285}
286
287static inline int sanity_check_area_boundary(struct f2fs_super_block *sb,
288							u64 offset)
289{
290	u32 segment0_blkaddr = get_sb(segment0_blkaddr);
291	u32 cp_blkaddr = get_sb(cp_blkaddr);
292	u32 sit_blkaddr = get_sb(sit_blkaddr);
293	u32 nat_blkaddr = get_sb(nat_blkaddr);
294	u32 ssa_blkaddr = get_sb(ssa_blkaddr);
295	u32 main_blkaddr = get_sb(main_blkaddr);
296	u32 segment_count_ckpt = get_sb(segment_count_ckpt);
297	u32 segment_count_sit = get_sb(segment_count_sit);
298	u32 segment_count_nat = get_sb(segment_count_nat);
299	u32 segment_count_ssa = get_sb(segment_count_ssa);
300	u32 segment_count_main = get_sb(segment_count_main);
301	u32 segment_count = get_sb(segment_count);
302	u32 log_blocks_per_seg = get_sb(log_blocks_per_seg);
303	u64 main_end_blkaddr = main_blkaddr +
304				(segment_count_main << log_blocks_per_seg);
305	u64 seg_end_blkaddr = segment0_blkaddr +
306				(segment_count << log_blocks_per_seg);
307
308	if (segment0_blkaddr != cp_blkaddr) {
309		MSG(0, "\tMismatch segment0(%u) cp_blkaddr(%u)\n",
310				segment0_blkaddr, cp_blkaddr);
311		return -1;
312	}
313
314	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
315							sit_blkaddr) {
316		MSG(0, "\tWrong CP boundary, start(%u) end(%u) blocks(%u)\n",
317			cp_blkaddr, sit_blkaddr,
318			segment_count_ckpt << log_blocks_per_seg);
319		return -1;
320	}
321
322	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
323							nat_blkaddr) {
324		MSG(0, "\tWrong SIT boundary, start(%u) end(%u) blocks(%u)\n",
325			sit_blkaddr, nat_blkaddr,
326			segment_count_sit << log_blocks_per_seg);
327		return -1;
328	}
329
330	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
331							ssa_blkaddr) {
332		MSG(0, "\tWrong NAT boundary, start(%u) end(%u) blocks(%u)\n",
333			nat_blkaddr, ssa_blkaddr,
334			segment_count_nat << log_blocks_per_seg);
335		return -1;
336	}
337
338	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
339							main_blkaddr) {
340		MSG(0, "\tWrong SSA boundary, start(%u) end(%u) blocks(%u)\n",
341			ssa_blkaddr, main_blkaddr,
342			segment_count_ssa << log_blocks_per_seg);
343		return -1;
344	}
345
346	if (main_end_blkaddr > seg_end_blkaddr) {
347		MSG(0, "\tWrong MAIN_AREA, start(%u) end(%u) block(%u)\n",
348			main_blkaddr,
349			segment0_blkaddr +
350				(segment_count << log_blocks_per_seg),
351			segment_count_main << log_blocks_per_seg);
352		return -1;
353	} else if (main_end_blkaddr < seg_end_blkaddr) {
354		int err;
355
356		set_sb(segment_count, (main_end_blkaddr -
357				segment0_blkaddr) >> log_blocks_per_seg);
358
359		err = dev_write(sb, offset, sizeof(struct f2fs_super_block));
360		MSG(0, "Info: Fix alignment: %s, start(%u) end(%u) block(%u)\n",
361			err ? "failed": "done",
362			main_blkaddr,
363			segment0_blkaddr +
364				(segment_count << log_blocks_per_seg),
365			segment_count_main << log_blocks_per_seg);
366	}
367	return 0;
368}
369
370int sanity_check_raw_super(struct f2fs_super_block *sb, u64 offset)
371{
372	unsigned int blocksize;
373
374	if (F2FS_SUPER_MAGIC != get_sb(magic))
375		return -1;
376
377	if (F2FS_BLKSIZE != PAGE_CACHE_SIZE)
378		return -1;
379
380	blocksize = 1 << get_sb(log_blocksize);
381	if (F2FS_BLKSIZE != blocksize)
382		return -1;
383
384	/* check log blocks per segment */
385	if (get_sb(log_blocks_per_seg) != 9)
386		return -1;
387
388	/* Currently, support 512/1024/2048/4096 bytes sector size */
389	if (get_sb(log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE ||
390			get_sb(log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE)
391		return -1;
392
393	if (get_sb(log_sectors_per_block) + get_sb(log_sectorsize) !=
394						F2FS_MAX_LOG_SECTOR_SIZE)
395		return -1;
396
397	/* check reserved ino info */
398	if (get_sb(node_ino) != 1 || get_sb(meta_ino) != 2 ||
399					get_sb(root_ino) != 3)
400		return -1;
401
402	/* Check zoned block device feature */
403	if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
404			!(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
405		MSG(0, "\tMissing zoned block device feature\n");
406		return -1;
407	}
408
409	if (get_sb(segment_count) > F2FS_MAX_SEGMENT)
410		return -1;
411
412	if (sanity_check_area_boundary(sb, offset))
413		return -1;
414	return 0;
415}
416
417int validate_super_block(struct f2fs_sb_info *sbi, int block)
418{
419	u64 offset;
420
421	sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
422
423	if (block == 0)
424		offset = F2FS_SUPER_OFFSET;
425	else
426		offset = F2FS_BLKSIZE + F2FS_SUPER_OFFSET;
427
428	if (dev_read(sbi->raw_super, offset, sizeof(struct f2fs_super_block)))
429		return -1;
430
431	if (!sanity_check_raw_super(sbi->raw_super, offset)) {
432		/* get kernel version */
433		if (c.kd >= 0) {
434			dev_read_version(c.version, 0, VERSION_LEN);
435			get_kernel_version(c.version);
436		} else {
437			memset(c.version, 0, VERSION_LEN);
438		}
439
440		/* build sb version */
441		memcpy(c.sb_version, sbi->raw_super->version, VERSION_LEN);
442		get_kernel_version(c.sb_version);
443		memcpy(c.init_version, sbi->raw_super->init_version, VERSION_LEN);
444		get_kernel_version(c.init_version);
445
446		MSG(0, "Info: MKFS version\n  \"%s\"\n", c.init_version);
447		MSG(0, "Info: FSCK version\n  from \"%s\"\n    to \"%s\"\n",
448					c.sb_version, c.version);
449		if (memcmp(c.sb_version, c.version, VERSION_LEN)) {
450			int ret;
451
452			memcpy(sbi->raw_super->version,
453						c.version, VERSION_LEN);
454			ret = dev_write(sbi->raw_super, offset,
455					sizeof(struct f2fs_super_block));
456			ASSERT(ret >= 0);
457
458			c.auto_fix = 0;
459			c.fix_on = 1;
460		}
461		print_sb_state(sbi->raw_super);
462		return 0;
463	}
464
465	free(sbi->raw_super);
466	sbi->raw_super = NULL;
467	MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", block);
468
469	return -EINVAL;
470}
471
472int init_sb_info(struct f2fs_sb_info *sbi)
473{
474	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
475	u64 total_sectors;
476	int i;
477
478	sbi->log_sectors_per_block = get_sb(log_sectors_per_block);
479	sbi->log_blocksize = get_sb(log_blocksize);
480	sbi->blocksize = 1 << sbi->log_blocksize;
481	sbi->log_blocks_per_seg = get_sb(log_blocks_per_seg);
482	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
483	sbi->segs_per_sec = get_sb(segs_per_sec);
484	sbi->secs_per_zone = get_sb(secs_per_zone);
485	sbi->total_sections = get_sb(section_count);
486	sbi->total_node_count = (get_sb(segment_count_nat) / 2) *
487				sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
488	sbi->root_ino_num = get_sb(root_ino);
489	sbi->node_ino_num = get_sb(node_ino);
490	sbi->meta_ino_num = get_sb(meta_ino);
491	sbi->cur_victim_sec = NULL_SEGNO;
492
493	for (i = 0; i < MAX_DEVICES; i++) {
494		if (!sb->devs[i].path[0])
495			break;
496
497		if (i) {
498			c.devices[i].path = strdup((char *)sb->devs[i].path);
499			if (get_device_info(i))
500				ASSERT(0);
501		} else {
502			ASSERT(!strcmp((char *)sb->devs[i].path,
503						(char *)c.devices[i].path));
504		}
505
506		c.devices[i].total_segments =
507			le32_to_cpu(sb->devs[i].total_segments);
508		if (i)
509			c.devices[i].start_blkaddr =
510				c.devices[i - 1].end_blkaddr + 1;
511		c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
512			c.devices[i].total_segments *
513			c.blks_per_seg - 1;
514		if (i == 0)
515			c.devices[i].end_blkaddr += get_sb(segment0_blkaddr);
516
517		c.ndevs = i + 1;
518		MSG(0, "Info: Device[%d] : %s blkaddr = %"PRIx64"--%"PRIx64"\n",
519				i, c.devices[i].path,
520				c.devices[i].start_blkaddr,
521				c.devices[i].end_blkaddr);
522	}
523
524	total_sectors = get_sb(block_count) << sbi->log_sectors_per_block;
525	MSG(0, "Info: total FS sectors = %"PRIu64" (%"PRIu64" MB)\n",
526				total_sectors, total_sectors >>
527						(20 - get_sb(log_sectorsize)));
528	return 0;
529}
530
531void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
532				unsigned long long *version)
533{
534	void *cp_page_1, *cp_page_2;
535	struct f2fs_checkpoint *cp;
536	unsigned long blk_size = sbi->blocksize;
537	unsigned long long cur_version = 0, pre_version = 0;
538	unsigned int crc = 0;
539	size_t crc_offset;
540
541	/* Read the 1st cp block in this CP pack */
542	cp_page_1 = malloc(PAGE_SIZE);
543	if (dev_read_block(cp_page_1, cp_addr) < 0)
544		goto invalid_cp1;
545
546	cp = (struct f2fs_checkpoint *)cp_page_1;
547	crc_offset = get_cp(checksum_offset);
548	if (crc_offset >= blk_size)
549		goto invalid_cp1;
550
551	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
552	if (f2fs_crc_valid(crc, cp, crc_offset))
553		goto invalid_cp1;
554
555	pre_version = get_cp(checkpoint_ver);
556
557	/* Read the 2nd cp block in this CP pack */
558	cp_page_2 = malloc(PAGE_SIZE);
559	cp_addr += get_cp(cp_pack_total_block_count) - 1;
560
561	if (dev_read_block(cp_page_2, cp_addr) < 0)
562		goto invalid_cp2;
563
564	cp = (struct f2fs_checkpoint *)cp_page_2;
565	crc_offset = get_cp(checksum_offset);
566	if (crc_offset >= blk_size)
567		goto invalid_cp2;
568
569	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
570	if (f2fs_crc_valid(crc, cp, crc_offset))
571		goto invalid_cp2;
572
573	cur_version = get_cp(checkpoint_ver);
574
575	if (cur_version == pre_version) {
576		*version = cur_version;
577		free(cp_page_2);
578		return cp_page_1;
579	}
580
581invalid_cp2:
582	free(cp_page_2);
583invalid_cp1:
584	free(cp_page_1);
585	return NULL;
586}
587
588int get_valid_checkpoint(struct f2fs_sb_info *sbi)
589{
590	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
591	void *cp1, *cp2, *cur_page;
592	unsigned long blk_size = sbi->blocksize;
593	unsigned long long cp1_version = 0, cp2_version = 0, version;
594	unsigned long long cp_start_blk_no;
595	unsigned int cp_payload, cp_blks;
596	int ret;
597
598	cp_payload = get_sb(cp_payload);
599	if (cp_payload > F2FS_BLK_ALIGN(MAX_SIT_BITMAP_SIZE))
600		return -EINVAL;
601
602	cp_blks = 1 + cp_payload;
603	sbi->ckpt = malloc(cp_blks * blk_size);
604	if (!sbi->ckpt)
605		return -ENOMEM;
606	/*
607	 * Finding out valid cp block involves read both
608	 * sets( cp pack1 and cp pack 2)
609	 */
610	cp_start_blk_no = get_sb(cp_blkaddr);
611	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
612
613	/* The second checkpoint pack should start at the next segment */
614	cp_start_blk_no += 1 << get_sb(log_blocks_per_seg);
615	cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
616
617	if (cp1 && cp2) {
618		if (ver_after(cp2_version, cp1_version)) {
619			cur_page = cp2;
620			sbi->cur_cp = 2;
621			version = cp2_version;
622		} else {
623			cur_page = cp1;
624			sbi->cur_cp = 1;
625			version = cp1_version;
626		}
627	} else if (cp1) {
628		cur_page = cp1;
629		sbi->cur_cp = 1;
630		version = cp1_version;
631	} else if (cp2) {
632		cur_page = cp2;
633		sbi->cur_cp = 2;
634		version = cp2_version;
635	} else
636		goto fail_no_cp;
637
638	MSG(0, "Info: CKPT version = %llx\n", version);
639
640	memcpy(sbi->ckpt, cur_page, blk_size);
641
642	if (cp_blks > 1) {
643		unsigned int i;
644		unsigned long long cp_blk_no;
645
646		cp_blk_no = get_sb(cp_blkaddr);
647		if (cur_page == cp2)
648			cp_blk_no += 1 << get_sb(log_blocks_per_seg);
649
650		/* copy sit bitmap */
651		for (i = 1; i < cp_blks; i++) {
652			unsigned char *ckpt = (unsigned char *)sbi->ckpt;
653			ret = dev_read_block(cur_page, cp_blk_no + i);
654			ASSERT(ret >= 0);
655			memcpy(ckpt + i * blk_size, cur_page, blk_size);
656		}
657	}
658	if (cp1)
659		free(cp1);
660	if (cp2)
661		free(cp2);
662	return 0;
663
664fail_no_cp:
665	free(sbi->ckpt);
666	sbi->ckpt = NULL;
667	return -EINVAL;
668}
669
670int sanity_check_ckpt(struct f2fs_sb_info *sbi)
671{
672	unsigned int total, fsmeta;
673	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
674	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
675
676	total = get_sb(segment_count);
677	fsmeta = get_sb(segment_count_ckpt);
678	fsmeta += get_sb(segment_count_sit);
679	fsmeta += get_sb(segment_count_nat);
680	fsmeta += get_cp(rsvd_segment_count);
681	fsmeta += get_sb(segment_count_ssa);
682
683	if (fsmeta >= total)
684		return 1;
685
686	return 0;
687}
688
689static pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
690{
691	struct f2fs_nm_info *nm_i = NM_I(sbi);
692	pgoff_t block_off;
693	pgoff_t block_addr;
694	int seg_off;
695
696	block_off = NAT_BLOCK_OFFSET(start);
697	seg_off = block_off >> sbi->log_blocks_per_seg;
698
699	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
700			(seg_off << sbi->log_blocks_per_seg << 1) +
701			(block_off & ((1 << sbi->log_blocks_per_seg) -1)));
702
703	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
704		block_addr += sbi->blocks_per_seg;
705
706	return block_addr;
707}
708
709static int f2fs_init_nid_bitmap(struct f2fs_sb_info *sbi)
710{
711	struct f2fs_nm_info *nm_i = NM_I(sbi);
712	int nid_bitmap_size = (nm_i->max_nid + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
713	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
714	struct f2fs_summary_block *sum = curseg->sum_blk;
715	struct f2fs_journal *journal = &sum->journal;
716	struct f2fs_nat_block nat_block;
717	block_t start_blk;
718	nid_t nid;
719	int i;
720
721	if (!(c.func == SLOAD))
722		return 0;
723
724	nm_i->nid_bitmap = (char *)calloc(nid_bitmap_size, 1);
725	if (!nm_i->nid_bitmap)
726		return -ENOMEM;
727
728	/* arbitrarily set 0 bit */
729	f2fs_set_bit(0, nm_i->nid_bitmap);
730
731	memset((void *)&nat_block, 0, sizeof(struct f2fs_nat_block));
732
733	for (nid = 0; nid < nm_i->max_nid; nid++) {
734		if (!(nid % NAT_ENTRY_PER_BLOCK)) {
735			int ret;
736
737			start_blk = current_nat_addr(sbi, nid);
738			ret = dev_read_block((void *)&nat_block, start_blk);
739			ASSERT(ret >= 0);
740		}
741
742		if (nat_block.entries[nid % NAT_ENTRY_PER_BLOCK].block_addr)
743			f2fs_set_bit(nid, nm_i->nid_bitmap);
744	}
745
746	for (i = 0; i < nats_in_cursum(journal); i++) {
747		block_t addr;
748
749		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
750		nid = le32_to_cpu(nid_in_journal(journal, i));
751		if (addr != NULL_ADDR)
752			f2fs_set_bit(nid, nm_i->nid_bitmap);
753	}
754	return 0;
755}
756
757int init_node_manager(struct f2fs_sb_info *sbi)
758{
759	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
760	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
761	struct f2fs_nm_info *nm_i = NM_I(sbi);
762	unsigned char *version_bitmap;
763	unsigned int nat_segs, nat_blocks;
764
765	nm_i->nat_blkaddr = get_sb(nat_blkaddr);
766
767	/* segment_count_nat includes pair segment so divide to 2. */
768	nat_segs = get_sb(segment_count_nat) >> 1;
769	nat_blocks = nat_segs << get_sb(log_blocks_per_seg);
770	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
771	nm_i->fcnt = 0;
772	nm_i->nat_cnt = 0;
773	nm_i->init_scan_nid = get_cp(next_free_nid);
774	nm_i->next_scan_nid = get_cp(next_free_nid);
775
776	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
777
778	nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
779	if (!nm_i->nat_bitmap)
780		return -ENOMEM;
781	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
782	if (!version_bitmap)
783		return -EFAULT;
784
785	/* copy version bitmap */
786	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
787	return f2fs_init_nid_bitmap(sbi);
788}
789
790int build_node_manager(struct f2fs_sb_info *sbi)
791{
792	int err;
793	sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
794	if (!sbi->nm_info)
795		return -ENOMEM;
796
797	err = init_node_manager(sbi);
798	if (err)
799		return err;
800
801	return 0;
802}
803
804int build_sit_info(struct f2fs_sb_info *sbi)
805{
806	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
807	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
808	struct sit_info *sit_i;
809	unsigned int sit_segs, start;
810	char *src_bitmap, *dst_bitmap;
811	unsigned int bitmap_size;
812
813	sit_i = malloc(sizeof(struct sit_info));
814	if (!sit_i)
815		return -ENOMEM;
816
817	SM_I(sbi)->sit_info = sit_i;
818
819	sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1);
820	if (!sit_i->sentries)
821		return -ENOMEM;
822
823	for (start = 0; start < TOTAL_SEGS(sbi); start++) {
824		sit_i->sentries[start].cur_valid_map
825			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
826		sit_i->sentries[start].ckpt_valid_map
827			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
828		if (!sit_i->sentries[start].cur_valid_map
829				|| !sit_i->sentries[start].ckpt_valid_map)
830			return -ENOMEM;
831	}
832
833	sit_segs = get_sb(segment_count_sit) >> 1;
834	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
835	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
836
837	dst_bitmap = malloc(bitmap_size);
838	memcpy(dst_bitmap, src_bitmap, bitmap_size);
839
840	sit_i->sit_base_addr = get_sb(sit_blkaddr);
841	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
842	sit_i->written_valid_blocks = get_cp(valid_block_count);
843	sit_i->sit_bitmap = dst_bitmap;
844	sit_i->bitmap_size = bitmap_size;
845	sit_i->dirty_sentries = 0;
846	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
847	sit_i->elapsed_time = get_cp(elapsed_time);
848	return 0;
849}
850
851void reset_curseg(struct f2fs_sb_info *sbi, int type)
852{
853	struct curseg_info *curseg = CURSEG_I(sbi, type);
854	struct summary_footer *sum_footer;
855	struct seg_entry *se;
856
857	sum_footer = &(curseg->sum_blk->footer);
858	memset(sum_footer, 0, sizeof(struct summary_footer));
859	if (IS_DATASEG(type))
860		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
861	if (IS_NODESEG(type))
862		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
863	se = get_seg_entry(sbi, curseg->segno);
864	se->type = type;
865}
866
867static void read_compacted_summaries(struct f2fs_sb_info *sbi)
868{
869	struct curseg_info *curseg;
870	unsigned int i, j, offset;
871	block_t start;
872	char *kaddr;
873	int ret;
874
875	start = start_sum_block(sbi);
876
877	kaddr = (char *)malloc(PAGE_SIZE);
878	ret = dev_read_block(kaddr, start++);
879	ASSERT(ret >= 0);
880
881	curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
882	memcpy(&curseg->sum_blk->journal.n_nats, kaddr, SUM_JOURNAL_SIZE);
883
884	curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
885	memcpy(&curseg->sum_blk->journal.n_sits, kaddr + SUM_JOURNAL_SIZE,
886						SUM_JOURNAL_SIZE);
887
888	offset = 2 * SUM_JOURNAL_SIZE;
889	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
890		unsigned short blk_off;
891		struct curseg_info *curseg = CURSEG_I(sbi, i);
892
893		reset_curseg(sbi, i);
894
895		if (curseg->alloc_type == SSR)
896			blk_off = sbi->blocks_per_seg;
897		else
898			blk_off = curseg->next_blkoff;
899
900		ASSERT(blk_off <= ENTRIES_IN_SUM);
901
902		for (j = 0; j < blk_off; j++) {
903			struct f2fs_summary *s;
904			s = (struct f2fs_summary *)(kaddr + offset);
905			curseg->sum_blk->entries[j] = *s;
906			offset += SUMMARY_SIZE;
907			if (offset + SUMMARY_SIZE <=
908					PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
909				continue;
910			memset(kaddr, 0, PAGE_SIZE);
911			ret = dev_read_block(kaddr, start++);
912			ASSERT(ret >= 0);
913			offset = 0;
914		}
915	}
916	free(kaddr);
917}
918
919static void restore_node_summary(struct f2fs_sb_info *sbi,
920		unsigned int segno, struct f2fs_summary_block *sum_blk)
921{
922	struct f2fs_node *node_blk;
923	struct f2fs_summary *sum_entry;
924	block_t addr;
925	unsigned int i;
926	int ret;
927
928	node_blk = malloc(F2FS_BLKSIZE);
929	ASSERT(node_blk);
930
931	/* scan the node segment */
932	addr = START_BLOCK(sbi, segno);
933	sum_entry = &sum_blk->entries[0];
934
935	for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
936		ret = dev_read_block(node_blk, addr);
937		ASSERT(ret >= 0);
938		sum_entry->nid = node_blk->footer.nid;
939		addr++;
940	}
941	free(node_blk);
942}
943
944static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
945{
946	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
947	struct f2fs_summary_block *sum_blk;
948	struct curseg_info *curseg;
949	unsigned int segno = 0;
950	block_t blk_addr = 0;
951	int ret;
952
953	if (IS_DATASEG(type)) {
954		segno = get_cp(cur_data_segno[type]);
955		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
956			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
957		else
958			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
959	} else {
960		segno = get_cp(cur_node_segno[type - CURSEG_HOT_NODE]);
961		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
962			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
963							type - CURSEG_HOT_NODE);
964		else
965			blk_addr = GET_SUM_BLKADDR(sbi, segno);
966	}
967
968	sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE);
969	ret = dev_read_block(sum_blk, blk_addr);
970	ASSERT(ret >= 0);
971
972	if (IS_NODESEG(type) && !is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
973		restore_node_summary(sbi, segno, sum_blk);
974
975	curseg = CURSEG_I(sbi, type);
976	memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE);
977	reset_curseg(sbi, type);
978	free(sum_blk);
979}
980
981void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
982					struct f2fs_summary *sum)
983{
984	struct f2fs_summary_block *sum_blk;
985	u32 segno, offset;
986	int type, ret;
987	struct seg_entry *se;
988
989	segno = GET_SEGNO(sbi, blk_addr);
990	offset = OFFSET_IN_SEG(sbi, blk_addr);
991
992	se = get_seg_entry(sbi, segno);
993
994	sum_blk = get_sum_block(sbi, segno, &type);
995	memcpy(&sum_blk->entries[offset], sum, sizeof(*sum));
996	sum_blk->footer.entry_type = IS_NODESEG(se->type) ? SUM_TYPE_NODE :
997							SUM_TYPE_DATA;
998
999	/* write SSA all the time */
1000	if (type < SEG_TYPE_MAX) {
1001		u64 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
1002		ret = dev_write_block(sum_blk, ssa_blk);
1003		ASSERT(ret >= 0);
1004	}
1005
1006	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
1007					type == SEG_TYPE_MAX)
1008		free(sum_blk);
1009}
1010
1011static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
1012{
1013	int type = CURSEG_HOT_DATA;
1014
1015	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1016		read_compacted_summaries(sbi);
1017		type = CURSEG_HOT_NODE;
1018	}
1019
1020	for (; type <= CURSEG_COLD_NODE; type++)
1021		read_normal_summaries(sbi, type);
1022}
1023
1024static void build_curseg(struct f2fs_sb_info *sbi)
1025{
1026	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1027	struct curseg_info *array;
1028	unsigned short blk_off;
1029	unsigned int segno;
1030	int i;
1031
1032	array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
1033	ASSERT(array);
1034
1035	SM_I(sbi)->curseg_array = array;
1036
1037	for (i = 0; i < NR_CURSEG_TYPE; i++) {
1038		array[i].sum_blk = malloc(PAGE_CACHE_SIZE);
1039		ASSERT(array[i].sum_blk);
1040		if (i <= CURSEG_COLD_DATA) {
1041			blk_off = get_cp(cur_data_blkoff[i]);
1042			segno = get_cp(cur_data_segno[i]);
1043		}
1044		if (i > CURSEG_COLD_DATA) {
1045			blk_off = get_cp(cur_node_blkoff[i - CURSEG_HOT_NODE]);
1046			segno = get_cp(cur_node_segno[i - CURSEG_HOT_NODE]);
1047		}
1048		array[i].segno = segno;
1049		array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
1050		array[i].next_segno = NULL_SEGNO;
1051		array[i].next_blkoff = blk_off;
1052		array[i].alloc_type = cp->alloc_type[i];
1053	}
1054	restore_curseg_summaries(sbi);
1055}
1056
1057static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
1058{
1059	unsigned int end_segno = SM_I(sbi)->segment_count - 1;
1060	ASSERT(segno <= end_segno);
1061}
1062
1063struct f2fs_sit_block *get_current_sit_page(struct f2fs_sb_info *sbi,
1064						unsigned int segno)
1065{
1066	struct sit_info *sit_i = SIT_I(sbi);
1067	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1068	block_t blk_addr = sit_i->sit_base_addr + offset;
1069	struct f2fs_sit_block *sit_blk;
1070	int ret;
1071
1072	sit_blk = calloc(BLOCK_SZ, 1);
1073	ASSERT(sit_blk);
1074	check_seg_range(sbi, segno);
1075
1076	/* calculate sit block address */
1077	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1078		blk_addr += sit_i->sit_blocks;
1079
1080	ret = dev_read_block(sit_blk, blk_addr);
1081	ASSERT(ret >= 0);
1082
1083	return sit_blk;
1084}
1085
1086void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
1087			unsigned int segno, struct f2fs_sit_block *sit_blk)
1088{
1089	struct sit_info *sit_i = SIT_I(sbi);
1090	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1091	block_t blk_addr = sit_i->sit_base_addr + offset;
1092	int ret;
1093
1094	/* calculate sit block address */
1095	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1096		blk_addr += sit_i->sit_blocks;
1097
1098	ret = dev_write_block(sit_blk, blk_addr);
1099	ASSERT(ret >= 0);
1100}
1101
1102void check_block_count(struct f2fs_sb_info *sbi,
1103		unsigned int segno, struct f2fs_sit_entry *raw_sit)
1104{
1105	struct f2fs_sm_info *sm_info = SM_I(sbi);
1106	unsigned int end_segno = sm_info->segment_count - 1;
1107	int valid_blocks = 0;
1108	unsigned int i;
1109
1110	/* check segment usage */
1111	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
1112		ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
1113				segno, GET_SIT_VBLOCKS(raw_sit));
1114
1115	/* check boundary of a given segment number */
1116	if (segno > end_segno)
1117		ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
1118
1119	/* check bitmap with valid block count */
1120	for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1121		valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
1122
1123	if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
1124		ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
1125				segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
1126
1127	if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
1128		ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
1129				segno, GET_SIT_TYPE(raw_sit));
1130}
1131
1132void seg_info_from_raw_sit(struct seg_entry *se,
1133		struct f2fs_sit_entry *raw_sit)
1134{
1135	se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
1136	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(raw_sit);
1137	memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
1138	memcpy(se->ckpt_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
1139	se->type = GET_SIT_TYPE(raw_sit);
1140	se->orig_type = GET_SIT_TYPE(raw_sit);
1141	se->mtime = le64_to_cpu(raw_sit->mtime);
1142}
1143
1144struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
1145		unsigned int segno)
1146{
1147	struct sit_info *sit_i = SIT_I(sbi);
1148	return &sit_i->sentries[segno];
1149}
1150
1151struct f2fs_summary_block *get_sum_block(struct f2fs_sb_info *sbi,
1152				unsigned int segno, int *ret_type)
1153{
1154	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1155	struct f2fs_summary_block *sum_blk;
1156	struct curseg_info *curseg;
1157	int type, ret;
1158	u64 ssa_blk;
1159
1160	*ret_type= SEG_TYPE_MAX;
1161
1162	ssa_blk = GET_SUM_BLKADDR(sbi, segno);
1163	for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
1164		if (segno == get_cp(cur_node_segno[type])) {
1165			curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
1166			if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
1167				ASSERT_MSG("segno [0x%x] indicates a data "
1168						"segment, but should be node",
1169						segno);
1170				*ret_type = -SEG_TYPE_CUR_NODE;
1171			} else {
1172				*ret_type = SEG_TYPE_CUR_NODE;
1173			}
1174			return curseg->sum_blk;
1175		}
1176	}
1177
1178	for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
1179		if (segno == get_cp(cur_data_segno[type])) {
1180			curseg = CURSEG_I(sbi, type);
1181			if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
1182				ASSERT_MSG("segno [0x%x] indicates a node "
1183						"segment, but should be data",
1184						segno);
1185				*ret_type = -SEG_TYPE_CUR_DATA;
1186			} else {
1187				*ret_type = SEG_TYPE_CUR_DATA;
1188			}
1189			return curseg->sum_blk;
1190		}
1191	}
1192
1193	sum_blk = calloc(BLOCK_SZ, 1);
1194	ASSERT(sum_blk);
1195
1196	ret = dev_read_block(sum_blk, ssa_blk);
1197	ASSERT(ret >= 0);
1198
1199	if (IS_SUM_NODE_SEG(sum_blk->footer))
1200		*ret_type = SEG_TYPE_NODE;
1201	else if (IS_SUM_DATA_SEG(sum_blk->footer))
1202		*ret_type = SEG_TYPE_DATA;
1203
1204	return sum_blk;
1205}
1206
1207int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
1208				struct f2fs_summary *sum_entry)
1209{
1210	struct f2fs_summary_block *sum_blk;
1211	u32 segno, offset;
1212	int type;
1213
1214	segno = GET_SEGNO(sbi, blk_addr);
1215	offset = OFFSET_IN_SEG(sbi, blk_addr);
1216
1217	sum_blk = get_sum_block(sbi, segno, &type);
1218	memcpy(sum_entry, &(sum_blk->entries[offset]),
1219				sizeof(struct f2fs_summary));
1220	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
1221					type == SEG_TYPE_MAX)
1222		free(sum_blk);
1223	return type;
1224}
1225
1226static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
1227				struct f2fs_nat_entry *raw_nat)
1228{
1229	struct f2fs_nat_block *nat_block;
1230	pgoff_t block_addr;
1231	int entry_off;
1232	int ret;
1233
1234	if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
1235		return;
1236
1237	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1238	ASSERT(nat_block);
1239
1240	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1241	block_addr = current_nat_addr(sbi, nid);
1242
1243	ret = dev_read_block(nat_block, block_addr);
1244	ASSERT(ret >= 0);
1245
1246	memcpy(raw_nat, &nat_block->entries[entry_off],
1247					sizeof(struct f2fs_nat_entry));
1248	free(nat_block);
1249}
1250
1251void update_data_blkaddr(struct f2fs_sb_info *sbi, nid_t nid,
1252				u16 ofs_in_node, block_t newaddr)
1253{
1254	struct f2fs_node *node_blk = NULL;
1255	struct node_info ni;
1256	block_t oldaddr, startaddr, endaddr;
1257	int ret;
1258
1259	node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
1260	ASSERT(node_blk);
1261
1262	get_node_info(sbi, nid, &ni);
1263
1264	/* read node_block */
1265	ret = dev_read_block(node_blk, ni.blk_addr);
1266	ASSERT(ret >= 0);
1267
1268	/* check its block address */
1269	if (node_blk->footer.nid == node_blk->footer.ino) {
1270		oldaddr = le32_to_cpu(node_blk->i.i_addr[ofs_in_node]);
1271		node_blk->i.i_addr[ofs_in_node] = cpu_to_le32(newaddr);
1272	} else {
1273		oldaddr = le32_to_cpu(node_blk->dn.addr[ofs_in_node]);
1274		node_blk->dn.addr[ofs_in_node] = cpu_to_le32(newaddr);
1275	}
1276
1277	ret = dev_write_block(node_blk, ni.blk_addr);
1278	ASSERT(ret >= 0);
1279
1280	/* check extent cache entry */
1281	if (node_blk->footer.nid != node_blk->footer.ino) {
1282		get_node_info(sbi, le32_to_cpu(node_blk->footer.ino), &ni);
1283
1284		/* read inode block */
1285		ret = dev_read_block(node_blk, ni.blk_addr);
1286		ASSERT(ret >= 0);
1287	}
1288
1289	startaddr = le32_to_cpu(node_blk->i.i_ext.blk_addr);
1290	endaddr = startaddr + le32_to_cpu(node_blk->i.i_ext.len);
1291	if (oldaddr >= startaddr && oldaddr < endaddr) {
1292		node_blk->i.i_ext.len = 0;
1293
1294		/* update inode block */
1295		ret = dev_write_block(node_blk, ni.blk_addr);
1296		ASSERT(ret >= 0);
1297	}
1298	free(node_blk);
1299}
1300
1301void update_nat_blkaddr(struct f2fs_sb_info *sbi, nid_t ino,
1302					nid_t nid, block_t newaddr)
1303{
1304	struct f2fs_nat_block *nat_block;
1305	pgoff_t block_addr;
1306	int entry_off;
1307	int ret;
1308
1309	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1310	ASSERT(nat_block);
1311
1312	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1313	block_addr = current_nat_addr(sbi, nid);
1314
1315	ret = dev_read_block(nat_block, block_addr);
1316	ASSERT(ret >= 0);
1317
1318	if (ino)
1319		nat_block->entries[entry_off].ino = cpu_to_le32(ino);
1320	nat_block->entries[entry_off].block_addr = cpu_to_le32(newaddr);
1321
1322	ret = dev_write_block(nat_block, block_addr);
1323	ASSERT(ret >= 0);
1324	free(nat_block);
1325}
1326
1327void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
1328{
1329	struct f2fs_nat_entry raw_nat;
1330	get_nat_entry(sbi, nid, &raw_nat);
1331	ni->nid = nid;
1332	node_info_from_raw_nat(ni, &raw_nat);
1333}
1334
1335void build_sit_entries(struct f2fs_sb_info *sbi)
1336{
1337	struct sit_info *sit_i = SIT_I(sbi);
1338	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1339	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1340	struct seg_entry *se;
1341	struct f2fs_sit_entry sit;
1342	unsigned int i, segno;
1343
1344	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1345		se = &sit_i->sentries[segno];
1346		struct f2fs_sit_block *sit_blk;
1347
1348		sit_blk = get_current_sit_page(sbi, segno);
1349		sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1350		free(sit_blk);
1351
1352		check_block_count(sbi, segno, &sit);
1353		seg_info_from_raw_sit(se, &sit);
1354	}
1355
1356	for (i = 0; i < sits_in_cursum(journal); i++) {
1357		segno = le32_to_cpu(segno_in_journal(journal, i));
1358		se = &sit_i->sentries[segno];
1359		sit = sit_in_journal(journal, i);
1360
1361		check_block_count(sbi, segno, &sit);
1362		seg_info_from_raw_sit(se, &sit);
1363	}
1364
1365}
1366
1367int build_segment_manager(struct f2fs_sb_info *sbi)
1368{
1369	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1370	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1371	struct f2fs_sm_info *sm_info;
1372
1373	sm_info = malloc(sizeof(struct f2fs_sm_info));
1374	if (!sm_info)
1375		return -ENOMEM;
1376
1377	/* init sm info */
1378	sbi->sm_info = sm_info;
1379	sm_info->seg0_blkaddr = get_sb(segment0_blkaddr);
1380	sm_info->main_blkaddr = get_sb(main_blkaddr);
1381	sm_info->segment_count = get_sb(segment_count);
1382	sm_info->reserved_segments = get_cp(rsvd_segment_count);
1383	sm_info->ovp_segments = get_cp(overprov_segment_count);
1384	sm_info->main_segments = get_sb(segment_count_main);
1385	sm_info->ssa_blkaddr = get_sb(ssa_blkaddr);
1386
1387	build_sit_info(sbi);
1388
1389	build_curseg(sbi);
1390
1391	build_sit_entries(sbi);
1392
1393	return 0;
1394}
1395
1396void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
1397{
1398	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1399	struct f2fs_sm_info *sm_i = SM_I(sbi);
1400	unsigned int segno = 0;
1401	char *ptr = NULL;
1402	u32 sum_vblocks = 0;
1403	u32 free_segs = 0;
1404	struct seg_entry *se;
1405
1406	fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
1407	fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
1408	ASSERT(fsck->sit_area_bitmap);
1409	ptr = fsck->sit_area_bitmap;
1410
1411	ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
1412
1413	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1414		se = get_seg_entry(sbi, segno);
1415
1416		memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1417		ptr += SIT_VBLOCK_MAP_SIZE;
1418
1419		if (se->valid_blocks == 0x0) {
1420			if (le32_to_cpu(sbi->ckpt->cur_node_segno[0]) == segno ||
1421				le32_to_cpu(sbi->ckpt->cur_data_segno[0]) == segno ||
1422				le32_to_cpu(sbi->ckpt->cur_node_segno[1]) == segno ||
1423				le32_to_cpu(sbi->ckpt->cur_data_segno[1]) == segno ||
1424				le32_to_cpu(sbi->ckpt->cur_node_segno[2]) == segno ||
1425				le32_to_cpu(sbi->ckpt->cur_data_segno[2]) == segno) {
1426				continue;
1427			} else {
1428				free_segs++;
1429			}
1430		} else {
1431			sum_vblocks += se->valid_blocks;
1432		}
1433	}
1434	fsck->chk.sit_valid_blocks = sum_vblocks;
1435	fsck->chk.sit_free_segs = free_segs;
1436
1437	DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
1438			sum_vblocks, sum_vblocks,
1439			free_segs, free_segs);
1440}
1441
1442void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
1443{
1444	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1445	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1446	struct sit_info *sit_i = SIT_I(sbi);
1447	unsigned int segno = 0;
1448	struct f2fs_summary_block *sum = curseg->sum_blk;
1449	char *ptr = NULL;
1450
1451	/* remove sit journal */
1452	sum->journal.n_sits = 0;
1453
1454	ptr = fsck->main_area_bitmap;
1455
1456	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1457		struct f2fs_sit_block *sit_blk;
1458		struct f2fs_sit_entry *sit;
1459		struct seg_entry *se;
1460		u16 valid_blocks = 0;
1461		u16 type;
1462		int i;
1463
1464		sit_blk = get_current_sit_page(sbi, segno);
1465		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1466		memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
1467
1468		/* update valid block count */
1469		for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1470			valid_blocks += get_bits_in_byte(sit->valid_map[i]);
1471
1472		se = get_seg_entry(sbi, segno);
1473		memcpy(se->cur_valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
1474		se->valid_blocks = valid_blocks;
1475		type = se->type;
1476		if (type >= NO_CHECK_TYPE) {
1477			ASSERT_MSG("Invalide type and valid blocks=%x,%x",
1478					segno, valid_blocks);
1479			type = 0;
1480		}
1481		sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
1482								valid_blocks);
1483		rewrite_current_sit_page(sbi, segno, sit_blk);
1484		free(sit_blk);
1485
1486		ptr += SIT_VBLOCK_MAP_SIZE;
1487	}
1488}
1489
1490static int flush_sit_journal_entries(struct f2fs_sb_info *sbi)
1491{
1492	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1493	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1494	struct sit_info *sit_i = SIT_I(sbi);
1495	unsigned int segno;
1496	int i;
1497
1498	for (i = 0; i < sits_in_cursum(journal); i++) {
1499		struct f2fs_sit_block *sit_blk;
1500		struct f2fs_sit_entry *sit;
1501		struct seg_entry *se;
1502
1503		segno = segno_in_journal(journal, i);
1504		se = get_seg_entry(sbi, segno);
1505
1506		sit_blk = get_current_sit_page(sbi, segno);
1507		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1508
1509		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1510		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
1511							se->valid_blocks);
1512		sit->mtime = cpu_to_le64(se->mtime);
1513
1514		rewrite_current_sit_page(sbi, segno, sit_blk);
1515		free(sit_blk);
1516	}
1517
1518	journal->n_sits = 0;
1519	return i;
1520}
1521
1522static int flush_nat_journal_entries(struct f2fs_sb_info *sbi)
1523{
1524	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1525	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1526	struct f2fs_nat_block *nat_block;
1527	pgoff_t block_addr;
1528	int entry_off;
1529	nid_t nid;
1530	int ret;
1531	int i = 0;
1532
1533	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1534	ASSERT(nat_block);
1535next:
1536	if (i >= nats_in_cursum(journal)) {
1537		free(nat_block);
1538		journal->n_nats = 0;
1539		return i;
1540	}
1541
1542	nid = le32_to_cpu(nid_in_journal(journal, i));
1543
1544	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1545	block_addr = current_nat_addr(sbi, nid);
1546
1547	ret = dev_read_block(nat_block, block_addr);
1548	ASSERT(ret >= 0);
1549
1550	memcpy(&nat_block->entries[entry_off], &nat_in_journal(journal, i),
1551					sizeof(struct f2fs_nat_entry));
1552
1553	ret = dev_write_block(nat_block, block_addr);
1554	ASSERT(ret >= 0);
1555	i++;
1556	goto next;
1557}
1558
1559void flush_journal_entries(struct f2fs_sb_info *sbi)
1560{
1561	int n_nats = flush_nat_journal_entries(sbi);
1562	int n_sits = flush_sit_journal_entries(sbi);
1563
1564	if (n_nats || n_sits)
1565		write_checkpoint(sbi);
1566}
1567
1568void flush_sit_entries(struct f2fs_sb_info *sbi)
1569{
1570	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1571	struct sit_info *sit_i = SIT_I(sbi);
1572	unsigned int segno = 0;
1573	u32 free_segs = 0;
1574
1575	/* update free segments */
1576	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
1577		struct f2fs_sit_block *sit_blk;
1578		struct f2fs_sit_entry *sit;
1579		struct seg_entry *se;
1580
1581		se = get_seg_entry(sbi, segno);
1582
1583		if (!se->dirty)
1584			continue;
1585
1586		sit_blk = get_current_sit_page(sbi, segno);
1587		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
1588		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1589		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
1590							se->valid_blocks);
1591		rewrite_current_sit_page(sbi, segno, sit_blk);
1592		free(sit_blk);
1593
1594		if (se->valid_blocks == 0x0 &&
1595				!IS_CUR_SEGNO(sbi, segno, NO_CHECK_TYPE))
1596			free_segs++;
1597	}
1598
1599	set_cp(free_segment_count, free_segs);
1600}
1601
1602int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left, int type)
1603{
1604	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1605	struct seg_entry *se;
1606	u32 segno;
1607	u64 offset;
1608	int not_enough = 0;
1609	u64 end_blkaddr = (get_sb(segment_count_main) <<
1610			get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
1611
1612	if (get_free_segments(sbi) <= SM_I(sbi)->reserved_segments + 1)
1613		not_enough = 1;
1614
1615	while (*to >= SM_I(sbi)->main_blkaddr && *to < end_blkaddr) {
1616		segno = GET_SEGNO(sbi, *to);
1617		offset = OFFSET_IN_SEG(sbi, *to);
1618
1619		se = get_seg_entry(sbi, segno);
1620
1621		if (se->valid_blocks == sbi->blocks_per_seg ||
1622				IS_CUR_SEGNO(sbi, segno, type)) {
1623			*to = left ? START_BLOCK(sbi, segno) - 1:
1624						START_BLOCK(sbi, segno + 1);
1625			continue;
1626		}
1627
1628		if (se->valid_blocks == 0 && not_enough) {
1629			*to = left ? START_BLOCK(sbi, segno) - 1:
1630						START_BLOCK(sbi, segno + 1);
1631			continue;
1632		}
1633
1634		if (se->valid_blocks == 0 && !(segno % sbi->segs_per_sec)) {
1635			struct seg_entry *se2;
1636			unsigned int i;
1637
1638			for (i = 1; i < sbi->segs_per_sec; i++) {
1639				se2 = get_seg_entry(sbi, segno + i);
1640				if (se2->valid_blocks)
1641					break;
1642			}
1643			if (i == sbi->segs_per_sec)
1644				return 0;
1645		}
1646
1647		if (se->type == type &&
1648			!f2fs_test_bit(offset, (const char *)se->cur_valid_map))
1649			return 0;
1650
1651		*to = left ? *to - 1: *to + 1;
1652	}
1653	return -1;
1654}
1655
1656void move_curseg_info(struct f2fs_sb_info *sbi, u64 from)
1657{
1658	int i, ret;
1659
1660	/* update summary blocks having nullified journal entries */
1661	for (i = 0; i < NO_CHECK_TYPE; i++) {
1662		struct curseg_info *curseg = CURSEG_I(sbi, i);
1663		struct f2fs_summary_block buf;
1664		u32 old_segno;
1665		u64 ssa_blk, to;
1666
1667		/* update original SSA too */
1668		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
1669		ret = dev_write_block(curseg->sum_blk, ssa_blk);
1670		ASSERT(ret >= 0);
1671
1672		to = from;
1673		ret = find_next_free_block(sbi, &to, 0, i);
1674		ASSERT(ret == 0);
1675
1676		old_segno = curseg->segno;
1677		curseg->segno = GET_SEGNO(sbi, to);
1678		curseg->next_blkoff = OFFSET_IN_SEG(sbi, to);
1679		curseg->alloc_type = SSR;
1680
1681		/* update new segno */
1682		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
1683		ret = dev_read_block(&buf, ssa_blk);
1684		ASSERT(ret >= 0);
1685
1686		memcpy(curseg->sum_blk, &buf, SUM_ENTRIES_SIZE);
1687
1688		/* update se->types */
1689		reset_curseg(sbi, i);
1690
1691		DBG(1, "Move curseg[%d] %x -> %x after %"PRIx64"\n",
1692				i, old_segno, curseg->segno, from);
1693	}
1694}
1695
1696void zero_journal_entries(struct f2fs_sb_info *sbi)
1697{
1698	int i;
1699
1700	for (i = 0; i < NO_CHECK_TYPE; i++)
1701		CURSEG_I(sbi, i)->sum_blk->journal.n_nats = 0;
1702}
1703
1704void write_curseg_info(struct f2fs_sb_info *sbi)
1705{
1706	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1707	int i;
1708
1709	for (i = 0; i < NO_CHECK_TYPE; i++) {
1710		cp->alloc_type[i] = CURSEG_I(sbi, i)->alloc_type;
1711		if (i < CURSEG_HOT_NODE) {
1712			set_cp(cur_data_segno[i], CURSEG_I(sbi, i)->segno);
1713			set_cp(cur_data_blkoff[i],
1714					CURSEG_I(sbi, i)->next_blkoff);
1715		} else {
1716			int n = i - CURSEG_HOT_NODE;
1717
1718			set_cp(cur_node_segno[n], CURSEG_I(sbi, i)->segno);
1719			set_cp(cur_node_blkoff[n],
1720					CURSEG_I(sbi, i)->next_blkoff);
1721		}
1722	}
1723}
1724
1725int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
1726					struct f2fs_nat_entry *raw_nat)
1727{
1728	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1729	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1730	int i = 0;
1731
1732	for (i = 0; i < nats_in_cursum(journal); i++) {
1733		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
1734			memcpy(raw_nat, &nat_in_journal(journal, i),
1735						sizeof(struct f2fs_nat_entry));
1736			DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
1737			return i;
1738		}
1739	}
1740	return -1;
1741}
1742
1743void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
1744{
1745	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1746	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1747	struct f2fs_nat_block *nat_block;
1748	pgoff_t block_addr;
1749	int entry_off;
1750	int ret;
1751	int i = 0;
1752
1753	/* check in journal */
1754	for (i = 0; i < nats_in_cursum(journal); i++) {
1755		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
1756			memset(&nat_in_journal(journal, i), 0,
1757					sizeof(struct f2fs_nat_entry));
1758			FIX_MSG("Remove nid [0x%x] in nat journal\n", nid);
1759			return;
1760		}
1761	}
1762	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1763	ASSERT(nat_block);
1764
1765	entry_off = nid % NAT_ENTRY_PER_BLOCK;
1766	block_addr = current_nat_addr(sbi, nid);
1767
1768	ret = dev_read_block(nat_block, block_addr);
1769	ASSERT(ret >= 0);
1770
1771	memset(&nat_block->entries[entry_off], 0,
1772					sizeof(struct f2fs_nat_entry));
1773
1774	ret = dev_write_block(nat_block, block_addr);
1775	ASSERT(ret >= 0);
1776	free(nat_block);
1777}
1778
1779void write_checkpoint(struct f2fs_sb_info *sbi)
1780{
1781	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1782	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1783	block_t orphan_blks = 0;
1784	unsigned long long cp_blk_no;
1785	u32 flags = CP_UMOUNT_FLAG;
1786	int i, ret;
1787	u_int32_t crc = 0;
1788
1789	if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
1790		orphan_blks = __start_sum_addr(sbi) - 1;
1791		flags |= CP_ORPHAN_PRESENT_FLAG;
1792	}
1793
1794	set_cp(ckpt_flags, flags);
1795
1796	set_cp(free_segment_count, get_free_segments(sbi));
1797	set_cp(valid_block_count, sbi->total_valid_block_count);
1798	set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_sb(cp_payload));
1799
1800	crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, CHECKSUM_OFFSET);
1801	*((__le32 *)((unsigned char *)cp + CHECKSUM_OFFSET)) = cpu_to_le32(crc);
1802
1803	cp_blk_no = get_sb(cp_blkaddr);
1804	if (sbi->cur_cp == 2)
1805		cp_blk_no += 1 << get_sb(log_blocks_per_seg);
1806
1807	/* write the first cp */
1808	ret = dev_write_block(cp, cp_blk_no++);
1809	ASSERT(ret >= 0);
1810
1811	/* skip payload */
1812	cp_blk_no += get_sb(cp_payload);
1813	/* skip orphan blocks */
1814	cp_blk_no += orphan_blks;
1815
1816	/* update summary blocks having nullified journal entries */
1817	for (i = 0; i < NO_CHECK_TYPE; i++) {
1818		struct curseg_info *curseg = CURSEG_I(sbi, i);
1819		u64 ssa_blk;
1820
1821		ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
1822		ASSERT(ret >= 0);
1823
1824		/* update original SSA too */
1825		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
1826		ret = dev_write_block(curseg->sum_blk, ssa_blk);
1827		ASSERT(ret >= 0);
1828	}
1829
1830	/* write the last cp */
1831	ret = dev_write_block(cp, cp_blk_no++);
1832	ASSERT(ret >= 0);
1833}
1834
1835void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
1836{
1837	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1838	struct f2fs_journal *journal = &curseg->sum_blk->journal;
1839	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1840	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1841	struct f2fs_nm_info *nm_i = NM_I(sbi);
1842	struct f2fs_nat_block *nat_block;
1843	struct node_info ni;
1844	u32 nid, nr_nat_blks;
1845	pgoff_t block_off;
1846	pgoff_t block_addr;
1847	int seg_off;
1848	int ret;
1849	unsigned int i;
1850
1851	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
1852	ASSERT(nat_block);
1853
1854	/* Alloc & build nat entry bitmap */
1855	nr_nat_blks = (get_sb(segment_count_nat) / 2) <<
1856					sbi->log_blocks_per_seg;
1857
1858	fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
1859	fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
1860	fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
1861	ASSERT(fsck->nat_area_bitmap);
1862
1863	fsck->entries = calloc(sizeof(struct f2fs_nat_entry),
1864					fsck->nr_nat_entries);
1865	ASSERT(fsck->entries);
1866
1867	for (block_off = 0; block_off < nr_nat_blks; block_off++) {
1868
1869		seg_off = block_off >> sbi->log_blocks_per_seg;
1870		block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1871			(seg_off << sbi->log_blocks_per_seg << 1) +
1872			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
1873
1874		if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
1875			block_addr += sbi->blocks_per_seg;
1876
1877		ret = dev_read_block(nat_block, block_addr);
1878		ASSERT(ret >= 0);
1879
1880		nid = block_off * NAT_ENTRY_PER_BLOCK;
1881		for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
1882			ni.nid = nid + i;
1883
1884			if ((nid + i) == F2FS_NODE_INO(sbi) ||
1885					(nid + i) == F2FS_META_INO(sbi)) {
1886				/* block_addr of node/meta inode should be 0x1 */
1887				if (le32_to_cpu(nat_block->entries[i].block_addr) != 0x1) {
1888					FIX_MSG("ino: 0x%x node/meta inode, block_addr= 0x%x -> 0x1",
1889							nid + i, le32_to_cpu(nat_block->entries[i].block_addr));
1890					nat_block->entries[i].block_addr = cpu_to_le32(0x1);
1891					ret = dev_write_block(nat_block, block_addr);
1892					ASSERT(ret >= 0);
1893				}
1894				continue;
1895			}
1896
1897			node_info_from_raw_nat(&ni, &nat_block->entries[i]);
1898			if (ni.blk_addr == 0x0)
1899				continue;
1900			if (ni.ino == 0x0) {
1901				ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
1902					" is invalid\n", ni.ino, ni.blk_addr);
1903			}
1904			if (ni.ino == (nid + i)) {
1905				fsck->nat_valid_inode_cnt++;
1906				DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
1907			}
1908			if (nid + i == 0) {
1909				/*
1910				 * nat entry [0] must be null.  If
1911				 * it is corrupted, set its bit in
1912				 * nat_area_bitmap, fsck_verify will
1913				 * nullify it
1914				 */
1915				ASSERT_MSG("Invalid nat entry[0]: "
1916					"blk_addr[0x%x]\n", ni.blk_addr);
1917				c.fix_on = 1;
1918				fsck->chk.valid_nat_entry_cnt--;
1919			}
1920
1921			DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
1922				nid + i, ni.blk_addr, ni.ino);
1923			f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
1924			fsck->chk.valid_nat_entry_cnt++;
1925
1926			fsck->entries[nid + i] = nat_block->entries[i];
1927		}
1928	}
1929
1930	/* Traverse nat journal, update the corresponding entries */
1931	for (i = 0; i < nats_in_cursum(journal); i++) {
1932		struct f2fs_nat_entry raw_nat;
1933		nid = le32_to_cpu(nid_in_journal(journal, i));
1934		ni.nid = nid;
1935
1936		DBG(3, "==> Found nid [0x%x] in nat cache, update it\n", nid);
1937
1938		/* Clear the original bit and count */
1939		if (fsck->entries[nid].block_addr != 0x0) {
1940			fsck->chk.valid_nat_entry_cnt--;
1941			f2fs_clear_bit(nid, fsck->nat_area_bitmap);
1942			if (fsck->entries[nid].ino == nid)
1943				fsck->nat_valid_inode_cnt--;
1944		}
1945
1946		/* Use nat entries in journal */
1947		memcpy(&raw_nat, &nat_in_journal(journal, i),
1948					sizeof(struct f2fs_nat_entry));
1949		node_info_from_raw_nat(&ni, &raw_nat);
1950		if (ni.blk_addr != 0x0) {
1951			if (ni.ino == 0x0)
1952				ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
1953					" is invalid\n", ni.ino, ni.blk_addr);
1954			if (ni.ino == nid) {
1955				fsck->nat_valid_inode_cnt++;
1956				DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
1957			}
1958			f2fs_set_bit(nid, fsck->nat_area_bitmap);
1959			fsck->chk.valid_nat_entry_cnt++;
1960			DBG(3, "nid[0x%x] in nat cache\n", nid);
1961		}
1962		fsck->entries[nid] = raw_nat;
1963	}
1964	free(nat_block);
1965
1966	DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
1967			fsck->chk.valid_nat_entry_cnt,
1968			fsck->chk.valid_nat_entry_cnt);
1969}
1970
1971static int check_sector_size(struct f2fs_super_block *sb)
1972{
1973	int index;
1974	u_int32_t log_sectorsize, log_sectors_per_block;
1975	u_int8_t *zero_buff;
1976
1977	log_sectorsize = log_base_2(c.sector_size);
1978	log_sectors_per_block = log_base_2(c.sectors_per_blk);
1979
1980	if (log_sectorsize == get_sb(log_sectorsize) &&
1981			log_sectors_per_block == get_sb(log_sectors_per_block))
1982		return 0;
1983
1984	zero_buff = calloc(F2FS_BLKSIZE, 1);
1985	ASSERT(zero_buff);
1986
1987	set_sb(log_sectorsize, log_sectorsize);
1988	set_sb(log_sectors_per_block, log_sectors_per_block);
1989
1990	memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
1991	DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
1992	for (index = 0; index < 2; index++) {
1993		if (dev_write(zero_buff, index * F2FS_BLKSIZE, F2FS_BLKSIZE)) {
1994			MSG(1, "\tError: Failed while writing supe_blk "
1995				"on disk!!! index : %d\n", index);
1996			free(zero_buff);
1997			return -1;
1998		}
1999	}
2000
2001	free(zero_buff);
2002	return 0;
2003}
2004
2005int f2fs_do_mount(struct f2fs_sb_info *sbi)
2006{
2007	struct f2fs_checkpoint *cp = NULL;
2008	int ret;
2009
2010	sbi->active_logs = NR_CURSEG_TYPE;
2011	ret = validate_super_block(sbi, 0);
2012	if (ret) {
2013		ret = validate_super_block(sbi, 1);
2014		if (ret)
2015			return -1;
2016	}
2017
2018	ret = check_sector_size(sbi->raw_super);
2019	if (ret)
2020		return -1;
2021
2022	print_raw_sb_info(F2FS_RAW_SUPER(sbi));
2023
2024	init_sb_info(sbi);
2025
2026	ret = get_valid_checkpoint(sbi);
2027	if (ret) {
2028		ERR_MSG("Can't find valid checkpoint\n");
2029		return -1;
2030	}
2031
2032	if (sanity_check_ckpt(sbi)) {
2033		ERR_MSG("Checkpoint is polluted\n");
2034		return -1;
2035	}
2036	cp = F2FS_CKPT(sbi);
2037
2038	print_ckpt_info(sbi);
2039
2040	if (c.auto_fix || c.preen_mode) {
2041		u32 flag = get_cp(ckpt_flags);
2042
2043		if (flag & CP_FSCK_FLAG)
2044			c.fix_on = 1;
2045		else if (!c.preen_mode)
2046			return 1;
2047	}
2048
2049	c.bug_on = 0;
2050
2051	sbi->total_valid_node_count = get_cp(valid_node_count);
2052	sbi->total_valid_inode_count = get_cp(valid_inode_count);
2053	sbi->user_block_count = get_cp(user_block_count);
2054	sbi->total_valid_block_count = get_cp(valid_block_count);
2055	sbi->last_valid_block_count = sbi->total_valid_block_count;
2056	sbi->alloc_valid_block_count = 0;
2057
2058	if (build_segment_manager(sbi)) {
2059		ERR_MSG("build_segment_manager failed\n");
2060		return -1;
2061	}
2062
2063	if (build_node_manager(sbi)) {
2064		ERR_MSG("build_node_manager failed\n");
2065		return -1;
2066	}
2067
2068	return 0;
2069}
2070
2071void f2fs_do_umount(struct f2fs_sb_info *sbi)
2072{
2073	struct sit_info *sit_i = SIT_I(sbi);
2074	struct f2fs_sm_info *sm_i = SM_I(sbi);
2075	struct f2fs_nm_info *nm_i = NM_I(sbi);
2076	unsigned int i;
2077
2078	/* free nm_info */
2079	if (c.func == SLOAD)
2080		free(nm_i->nid_bitmap);
2081	free(nm_i->nat_bitmap);
2082	free(sbi->nm_info);
2083
2084	/* free sit_info */
2085	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
2086		free(sit_i->sentries[i].cur_valid_map);
2087		free(sit_i->sentries[i].ckpt_valid_map);
2088	}
2089	free(sit_i->sit_bitmap);
2090	free(sm_i->sit_info);
2091
2092	/* free sm_info */
2093	for (i = 0; i < NR_CURSEG_TYPE; i++)
2094		free(sm_i->curseg_array[i].sum_blk);
2095
2096	free(sm_i->curseg_array);
2097	free(sbi->sm_info);
2098
2099	free(sbi->ckpt);
2100	free(sbi->raw_super);
2101}
2102