1/*
2 * btrfs.c -- readonly btrfs support for syslinux
3 * Some data structures are derivated from btrfs-tools-0.19 ctree.h
4 * Copyright 2009-2014 Intel Corporation; authors: Alek Du, H. Peter Anvin
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
9 * Boston MA 02111-1307, USA; either version 2 of the License, or
10 * (at your option) any later version; incorporated herein by reference.
11 *
12 */
13
14#include <dprintf.h>
15#include <stdio.h>
16#include <string.h>
17#include <cache.h>
18#include <core.h>
19#include <disk.h>
20#include <fs.h>
21#include <dirent.h>
22#include <minmax.h>
23#include "btrfs.h"
24
25union tree_buf {
26	struct btrfs_header header;
27	struct btrfs_node node;
28	struct btrfs_leaf leaf;
29};
30
31/* filesystem instance structure */
32struct btrfs_info {
33	u64 fs_tree;
34	struct btrfs_super_block sb;
35	struct btrfs_chunk_map chunk_map;
36	union tree_buf *tree_buf;
37};
38
39/* compare function used for bin_search */
40typedef int (*cmp_func)(const void *ptr1, const void *ptr2);
41
42/* simple but useful bin search, used for chunk search and btree search */
43static int bin_search(void *ptr, int item_size, void *cmp_item, cmp_func func,
44		      int min, int max, int *slot)
45{
46	int low = min;
47	int high = max;
48	int mid;
49	int ret;
50	unsigned long offset;
51	void *item;
52
53	while (low < high) {
54		mid = (low + high) / 2;
55		offset = mid * item_size;
56
57		item = ptr + offset;
58		ret = func(item, cmp_item);
59
60		if (ret < 0)
61			low = mid + 1;
62		else if (ret > 0)
63			high = mid;
64		else {
65			*slot = mid;
66			return 0;
67		}
68	}
69	*slot = low;
70	return 1;
71}
72
73static int btrfs_comp_chunk_map(struct btrfs_chunk_map_item *m1,
74				struct btrfs_chunk_map_item *m2)
75{
76	if (m1->logical > m2->logical)
77		return 1;
78	if (m1->logical < m2->logical)
79		return -1;
80	return 0;
81}
82
83/* insert a new chunk mapping item */
84static void insert_map(struct fs_info *fs, struct btrfs_chunk_map_item *item)
85{
86	struct btrfs_info * const bfs = fs->fs_info;
87	struct btrfs_chunk_map *chunk_map = &bfs->chunk_map;
88	int ret;
89	int slot;
90	int i;
91
92	if (chunk_map->map == NULL) { /* first item */
93		chunk_map->map_length = BTRFS_MAX_CHUNK_ENTRIES;
94		chunk_map->map = malloc(chunk_map->map_length
95					* sizeof(chunk_map->map[0]));
96		chunk_map->map[0] = *item;
97		chunk_map->cur_length = 1;
98		return;
99	}
100	ret = bin_search(chunk_map->map, sizeof(*item), item,
101			(cmp_func)btrfs_comp_chunk_map, 0,
102			chunk_map->cur_length, &slot);
103	if (ret == 0)/* already in map */
104		return;
105	if (chunk_map->cur_length == BTRFS_MAX_CHUNK_ENTRIES) {
106		/* should be impossible */
107		printf("too many chunk items\n");
108		return;
109	}
110	for (i = chunk_map->cur_length; i > slot; i--)
111		chunk_map->map[i] = chunk_map->map[i-1];
112	chunk_map->map[slot] = *item;
113	chunk_map->cur_length++;
114}
115
116/*
117 * from sys_chunk_array or chunk_tree, we can convert a logical address to
118 * a physical address we can not support multi device case yet
119 */
120static u64 logical_physical(struct fs_info *fs, u64 logical)
121{
122	struct btrfs_info * const bfs = fs->fs_info;
123	struct btrfs_chunk_map *chunk_map = &bfs->chunk_map;
124	struct btrfs_chunk_map_item item;
125	int slot, ret;
126
127	item.logical = logical;
128	ret = bin_search(chunk_map->map, sizeof(chunk_map->map[0]), &item,
129			(cmp_func)btrfs_comp_chunk_map, 0,
130			chunk_map->cur_length, &slot);
131	if (ret == 0)
132		slot++;
133	else if (slot == 0)
134		return -1;
135	if (logical >=
136		chunk_map->map[slot-1].logical + chunk_map->map[slot-1].length)
137		return -1;
138	return chunk_map->map[slot-1].physical + logical -
139			chunk_map->map[slot-1].logical;
140}
141
142/* btrfs has several super block mirrors, need to calculate their location */
143static inline u64 btrfs_sb_offset(int mirror)
144{
145	u64 start = 16 * 1024;
146	if (mirror)
147		return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
148	return BTRFS_SUPER_INFO_OFFSET;
149}
150
151/* find the most recent super block */
152static void btrfs_read_super_block(struct fs_info *fs)
153{
154	int i;
155	int ret;
156	u8 fsid[BTRFS_FSID_SIZE];
157	u64 offset;
158	u64 transid = 0;
159	struct btrfs_super_block buf;
160	struct btrfs_info * const bfs = fs->fs_info;
161
162	bfs->sb.total_bytes = ~0; /* Unknown as of yet */
163
164	/* find most recent super block */
165	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
166		offset = btrfs_sb_offset(i);
167		if (offset >= bfs->sb.total_bytes)
168			break;
169
170		ret = cache_read(fs, (char *)&buf, offset, sizeof(buf));
171		if (ret < sizeof(buf))
172			break;
173
174		if (buf.bytenr != offset ||
175		    strncmp((char *)(&buf.magic), BTRFS_MAGIC,
176			    sizeof(buf.magic)))
177			continue;
178
179		if (i == 0)
180			memcpy(fsid, buf.fsid, sizeof(fsid));
181		else if (memcmp(fsid, buf.fsid, sizeof(fsid)))
182			continue;
183
184		if (buf.generation > transid) {
185			memcpy(&bfs->sb, &buf, sizeof(bfs->sb));
186			transid = buf.generation;
187		}
188	}
189}
190
191static inline unsigned long btrfs_chunk_item_size(int num_stripes)
192{
193	return sizeof(struct btrfs_chunk) +
194		sizeof(struct btrfs_stripe) * (num_stripes - 1);
195}
196
197static void clear_path(struct btrfs_path *path)
198{
199	memset(path, 0, sizeof(*path));
200}
201
202static int btrfs_comp_keys(const struct btrfs_disk_key *k1,
203			   const struct btrfs_disk_key *k2)
204{
205	if (k1->objectid > k2->objectid)
206		return 1;
207	if (k1->objectid < k2->objectid)
208		return -1;
209	if (k1->type > k2->type)
210		return 1;
211	if (k1->type < k2->type)
212		return -1;
213	if (k1->offset > k2->offset)
214		return 1;
215	if (k1->offset < k2->offset)
216		return -1;
217	return 0;
218}
219
220/* compare keys but ignore offset, is useful to enumerate all same kind keys */
221static int btrfs_comp_keys_type(const struct btrfs_disk_key *k1,
222				const struct btrfs_disk_key *k2)
223{
224	if (k1->objectid > k2->objectid)
225		return 1;
226	if (k1->objectid < k2->objectid)
227		return -1;
228	if (k1->type > k2->type)
229		return 1;
230	if (k1->type < k2->type)
231		return -1;
232	return 0;
233}
234
235/* seach tree directly on disk ... */
236static int search_tree(struct fs_info *fs, u64 loffset,
237		       struct btrfs_disk_key *key, struct btrfs_path *path)
238{
239	struct btrfs_info * const bfs = fs->fs_info;
240	union tree_buf *tree_buf = bfs->tree_buf;
241	int slot, ret;
242	u64 offset;
243
244	offset = logical_physical(fs, loffset);
245	cache_read(fs, &tree_buf->header, offset, sizeof(tree_buf->header));
246	if (tree_buf->header.level) {
247		/* inner node */
248		cache_read(fs, (char *)&tree_buf->node.ptrs[0],
249			   offset + sizeof tree_buf->header,
250			   bfs->sb.nodesize - sizeof tree_buf->header);
251		path->itemsnr[tree_buf->header.level] = tree_buf->header.nritems;
252		path->offsets[tree_buf->header.level] = loffset;
253		ret = bin_search(&tree_buf->node.ptrs[0],
254				 sizeof(struct btrfs_key_ptr),
255				 key, (cmp_func)btrfs_comp_keys,
256				 path->slots[tree_buf->header.level],
257				 tree_buf->header.nritems, &slot);
258		if (ret && slot > path->slots[tree_buf->header.level])
259			slot--;
260		path->slots[tree_buf->header.level] = slot;
261		ret = search_tree(fs, tree_buf->node.ptrs[slot].blockptr,
262				  key, path);
263	} else {
264		/* leaf node */
265		cache_read(fs, (char *)&tree_buf->leaf.items[0],
266			   offset + sizeof tree_buf->header,
267			   bfs->sb.leafsize - sizeof tree_buf->header);
268		path->itemsnr[tree_buf->header.level] = tree_buf->header.nritems;
269		path->offsets[tree_buf->header.level] = loffset;
270		ret = bin_search(&tree_buf->leaf.items[0],
271				 sizeof(struct btrfs_item),
272				 key, (cmp_func)btrfs_comp_keys,
273				 path->slots[0],
274				 tree_buf->header.nritems, &slot);
275		if (ret && slot > path->slots[tree_buf->header.level])
276			slot--;
277		path->slots[tree_buf->header.level] = slot;
278		path->item = tree_buf->leaf.items[slot];
279		cache_read(fs, (char *)&path->data,
280			   offset + sizeof tree_buf->header +
281			   tree_buf->leaf.items[slot].offset,
282			   tree_buf->leaf.items[slot].size);
283	}
284	return ret;
285}
286
287/* return 0 if leaf found */
288static int next_leaf(struct fs_info *fs, struct btrfs_disk_key *key, struct btrfs_path *path)
289{
290	int slot;
291	int level = 1;
292
293	while (level < BTRFS_MAX_LEVEL) {
294		if (!path->itemsnr[level]) /* no more nodes */
295			return 1;
296		slot = path->slots[level] + 1;
297		if (slot >= path->itemsnr[level]) {
298			level++;
299			continue;;
300		}
301		path->slots[level] = slot;
302		path->slots[level-1] = 0; /* reset low level slots info */
303		search_tree(fs, path->offsets[level], key, path);
304		break;
305	}
306	if (level == BTRFS_MAX_LEVEL)
307		return 1;
308	return 0;
309}
310
311/* return 0 if slot found */
312static int next_slot(struct fs_info *fs, struct btrfs_disk_key *key,
313		     struct btrfs_path *path)
314{
315	int slot;
316
317	if (!path->itemsnr[0])
318		return 1;
319	slot = path->slots[0] + 1;
320	if (slot >= path->itemsnr[0])
321		return 1;
322	path->slots[0] = slot;
323	search_tree(fs, path->offsets[0], key, path);
324	return 0;
325}
326
327/*
328 * read chunk_array in super block
329 */
330static void btrfs_read_sys_chunk_array(struct fs_info *fs)
331{
332	struct btrfs_info * const bfs = fs->fs_info;
333	struct btrfs_chunk_map_item item;
334	struct btrfs_disk_key *key;
335	struct btrfs_chunk *chunk;
336	int cur;
337
338	/* read chunk array in superblock */
339	cur = 0;
340	while (cur < bfs->sb.sys_chunk_array_size) {
341		key = (struct btrfs_disk_key *)(bfs->sb.sys_chunk_array + cur);
342		cur += sizeof(*key);
343		chunk = (struct btrfs_chunk *)(bfs->sb.sys_chunk_array + cur);
344		cur += btrfs_chunk_item_size(chunk->num_stripes);
345		/* insert to mapping table, ignore multi stripes */
346		item.logical = key->offset;
347		item.length = chunk->length;
348		item.devid = chunk->stripe.devid;
349		item.physical = chunk->stripe.offset;/*ignore other stripes */
350		insert_map(fs, &item);
351	}
352}
353
354/* read chunk items from chunk_tree and insert them to chunk map */
355static void btrfs_read_chunk_tree(struct fs_info *fs)
356{
357	struct btrfs_info * const bfs = fs->fs_info;
358	struct btrfs_disk_key search_key;
359	struct btrfs_chunk *chunk;
360	struct btrfs_chunk_map_item item;
361	struct btrfs_path path;
362
363	if (!(bfs->sb.flags & BTRFS_SUPER_FLAG_METADUMP)) {
364		if (bfs->sb.num_devices > 1)
365			printf("warning: only support single device btrfs\n");
366		/* read chunk from chunk_tree */
367		search_key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
368		search_key.type = BTRFS_CHUNK_ITEM_KEY;
369		search_key.offset = 0;
370		clear_path(&path);
371		search_tree(fs, bfs->sb.chunk_root, &search_key, &path);
372		do {
373			do {
374				if (btrfs_comp_keys_type(&search_key,
375							&path.item.key))
376					break;
377				chunk = (struct btrfs_chunk *)(path.data);
378				/* insert to mapping table, ignore stripes */
379				item.logical = path.item.key.offset;
380				item.length = chunk->length;
381				item.devid = chunk->stripe.devid;
382				item.physical = chunk->stripe.offset;
383				insert_map(fs, &item);
384			} while (!next_slot(fs, &search_key, &path));
385			if (btrfs_comp_keys_type(&search_key, &path.item.key))
386				break;
387		} while (!next_leaf(fs, &search_key, &path));
388	}
389}
390
391static inline u64 btrfs_name_hash(const char *name, int len)
392{
393	return btrfs_crc32c((u32)~1, name, len);
394}
395
396static struct inode *btrfs_iget_by_inr(struct fs_info *fs, u64 inr)
397{
398	struct btrfs_info * const bfs = fs->fs_info;
399	struct inode *inode;
400	struct btrfs_inode_item inode_item;
401	struct btrfs_disk_key search_key;
402	struct btrfs_path path;
403	int ret;
404
405	/* FIXME: some BTRFS inode member are u64, while our logical inode
406           is u32, we may need change them to u64 later */
407	search_key.objectid = inr;
408	search_key.type = BTRFS_INODE_ITEM_KEY;
409	search_key.offset = 0;
410	clear_path(&path);
411	ret = search_tree(fs, bfs->fs_tree, &search_key, &path);
412	if (ret)
413		return NULL;
414	inode_item = *(struct btrfs_inode_item *)path.data;
415	if (!(inode = alloc_inode(fs, inr, sizeof(struct btrfs_pvt_inode))))
416		return NULL;
417	inode->ino = inr;
418	inode->size = inode_item.size;
419	inode->mode = IFTODT(inode_item.mode);
420
421	if (inode->mode == DT_REG || inode->mode == DT_LNK) {
422		struct btrfs_file_extent_item extent_item;
423		u64 offset;
424
425		/* get file_extent_item */
426		search_key.type = BTRFS_EXTENT_DATA_KEY;
427		search_key.offset = 0;
428		clear_path(&path);
429		ret = search_tree(fs, bfs->fs_tree, &search_key, &path);
430		if (ret)
431			return NULL; /* impossible */
432		extent_item = *(struct btrfs_file_extent_item *)path.data;
433		if (extent_item.type == BTRFS_FILE_EXTENT_INLINE)/* inline file */
434			offset = path.offsets[0] + sizeof(struct btrfs_header)
435				+ path.item.offset
436				+ offsetof(struct btrfs_file_extent_item, disk_bytenr);
437		else
438			offset = extent_item.disk_bytenr;
439		PVT(inode)->offset = offset;
440	}
441	return inode;
442}
443
444static struct inode *btrfs_iget_root(struct fs_info *fs)
445{
446	/* BTRFS_FIRST_CHUNK_TREE_OBJECTID(256) actually is first OBJECTID for FS_TREE */
447	return btrfs_iget_by_inr(fs, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
448}
449
450static struct inode *btrfs_iget(const char *name, struct inode *parent)
451{
452	struct fs_info * const fs = parent->fs;
453	struct btrfs_info * const bfs = fs->fs_info;
454	struct btrfs_disk_key search_key;
455	struct btrfs_path path;
456	struct btrfs_dir_item dir_item;
457	int ret;
458
459	search_key.objectid = parent->ino;
460	search_key.type = BTRFS_DIR_ITEM_KEY;
461	search_key.offset = btrfs_name_hash(name, strlen(name));
462	clear_path(&path);
463	ret = search_tree(fs, bfs->fs_tree, &search_key, &path);
464	if (ret)
465		return NULL;
466	dir_item = *(struct btrfs_dir_item *)path.data;
467
468	return btrfs_iget_by_inr(fs, dir_item.location.objectid);
469}
470
471static int btrfs_readlink(struct inode *inode, char *buf)
472{
473	cache_read(inode->fs, buf,
474		   logical_physical(inode->fs, PVT(inode)->offset),
475		   inode->size);
476	buf[inode->size] = '\0';
477	return inode->size;
478}
479
480static int btrfs_readdir(struct file *file, struct dirent *dirent)
481{
482	struct fs_info * const fs = file->fs;
483	struct btrfs_info * const bfs = fs->fs_info;
484	struct inode * const inode = file->inode;
485	struct btrfs_disk_key search_key;
486	struct btrfs_path path;
487	struct btrfs_dir_item *dir_item;
488	int ret;
489
490	/*
491	 * we use file->offset to store last search key.offset, will will search
492	 * key that lower that offset, 0 means first search and we will search
493         * -1UL, which is the biggest possible key
494         */
495	search_key.objectid = inode->ino;
496	search_key.type = BTRFS_DIR_ITEM_KEY;
497	search_key.offset = file->offset - 1;
498	clear_path(&path);
499	ret = search_tree(fs, bfs->fs_tree, &search_key, &path);
500
501	if (ret) {
502		if (btrfs_comp_keys_type(&search_key, &path.item.key))
503			return -1;
504	}
505
506	dir_item = (struct btrfs_dir_item *)path.data;
507	file->offset = path.item.key.offset;
508	dirent->d_ino = dir_item->location.objectid;
509	dirent->d_off = file->offset;
510	dirent->d_reclen = offsetof(struct dirent, d_name)
511		+ dir_item->name_len + 1;
512	dirent->d_type = IFTODT(dir_item->type);
513	memcpy(dirent->d_name, dir_item + 1, dir_item->name_len);
514	dirent->d_name[dir_item->name_len] = '\0';
515
516	return 0;
517}
518
519static int btrfs_next_extent(struct inode *inode, uint32_t lstart)
520{
521	struct btrfs_disk_key search_key;
522	struct btrfs_file_extent_item extent_item;
523	struct btrfs_path path;
524	int ret;
525	u64 offset;
526	struct fs_info * const fs = inode->fs;
527	struct btrfs_info * const bfs = fs->fs_info;
528	u32 sec_shift = SECTOR_SHIFT(fs);
529	u32 sec_size = SECTOR_SIZE(fs);
530
531	search_key.objectid = inode->ino;
532	search_key.type = BTRFS_EXTENT_DATA_KEY;
533	search_key.offset = lstart << sec_shift;
534	clear_path(&path);
535	ret = search_tree(fs, bfs->fs_tree, &search_key, &path);
536	if (ret) { /* impossible */
537		printf("btrfs: search extent data error!\n");
538		return -1;
539	}
540	extent_item = *(struct btrfs_file_extent_item *)path.data;
541
542	if (extent_item.encryption) {
543	    printf("btrfs: found encrypted data, cannot continue!\n");
544	    return -1;
545	}
546	if (extent_item.compression) {
547	    printf("btrfs: found compressed data, cannot continue!\n");
548	    return -1;
549	}
550
551	if (extent_item.type == BTRFS_FILE_EXTENT_INLINE) {/* inline file */
552		/* we fake a extent here, and PVT of inode will tell us */
553		offset = path.offsets[0] + sizeof(struct btrfs_header)
554			+ path.item.offset
555			+ offsetof(struct btrfs_file_extent_item, disk_bytenr);
556		inode->next_extent.len =
557			(inode->size + sec_size -1) >> sec_shift;
558	} else {
559		offset = extent_item.disk_bytenr + extent_item.offset;
560		inode->next_extent.len =
561			(extent_item.num_bytes + sec_size - 1) >> sec_shift;
562	}
563	inode->next_extent.pstart = logical_physical(fs, offset) >> sec_shift;
564	PVT(inode)->offset = offset;
565	return 0;
566}
567
568static uint32_t btrfs_getfssec(struct file *file, char *buf, int sectors,
569					bool *have_more)
570{
571	u32 ret;
572	struct fs_info *fs = file->fs;
573	u32 off = PVT(file->inode)->offset % SECTOR_SIZE(fs);
574	bool handle_inline = false;
575
576	if (off && !file->offset) {/* inline file first read patch */
577		file->inode->size += off;
578		handle_inline = true;
579	}
580	ret = generic_getfssec(file, buf, sectors, have_more);
581	if (!ret)
582		return ret;
583	off = PVT(file->inode)->offset % SECTOR_SIZE(fs);
584	if (handle_inline) {/* inline file patch */
585		ret -= off;
586		memcpy(buf, buf + off, ret);
587	}
588	return ret;
589}
590
591static void btrfs_get_fs_tree(struct fs_info *fs)
592{
593	struct btrfs_info * const bfs = fs->fs_info;
594	struct btrfs_disk_key search_key;
595	struct btrfs_path path;
596	struct btrfs_root_item *tree;
597	bool subvol_ok = false;
598
599	/* check if subvol is filled by installer */
600	if (*SubvolName) {
601		search_key.objectid = BTRFS_FS_TREE_OBJECTID;
602		search_key.type = BTRFS_ROOT_REF_KEY;
603		search_key.offset = 0;
604		clear_path(&path);
605		if (search_tree(fs, bfs->sb.root, &search_key, &path))
606			next_slot(fs, &search_key, &path);
607		do {
608			do {
609				struct btrfs_root_ref *ref;
610				int pathlen;
611
612				if (btrfs_comp_keys_type(&search_key,
613							&path.item.key))
614					break;
615				ref = (struct btrfs_root_ref *)path.data;
616				pathlen = path.item.size - sizeof(struct btrfs_root_ref);
617
618				if (!strncmp((char*)(ref + 1), SubvolName, pathlen)) {
619					subvol_ok = true;
620					break;
621				}
622			} while (!next_slot(fs, &search_key, &path));
623			if (subvol_ok)
624				break;
625			if (btrfs_comp_keys_type(&search_key, &path.item.key))
626				break;
627		} while (!next_leaf(fs, &search_key, &path));
628		if (!subvol_ok) /* should be impossible */
629			printf("no subvol found!\n");
630	}
631	/* find fs_tree from tree_root */
632	if (subvol_ok)
633		search_key.objectid = path.item.key.offset;
634	else /* "default" volume */
635		search_key.objectid = BTRFS_FS_TREE_OBJECTID;
636	search_key.type = BTRFS_ROOT_ITEM_KEY;
637	search_key.offset = -1;
638	clear_path(&path);
639	search_tree(fs, bfs->sb.root, &search_key, &path);
640	tree = (struct btrfs_root_item *)path.data;
641	bfs->fs_tree = tree->bytenr;
642}
643
644/* init. the fs meta data, return the block size shift bits. */
645static int btrfs_fs_init(struct fs_info *fs)
646{
647	struct disk *disk = fs->fs_dev->disk;
648	struct btrfs_info *bfs;
649
650	btrfs_init_crc32c();
651
652	bfs = zalloc(sizeof(struct btrfs_info));
653	if (!bfs)
654		return -1;
655
656	fs->fs_info = bfs;
657
658	fs->sector_shift = disk->sector_shift;
659	fs->sector_size  = 1 << fs->sector_shift;
660	fs->block_shift  = BTRFS_BLOCK_SHIFT;
661	fs->block_size   = 1 << fs->block_shift;
662
663	/* Initialize the block cache */
664	cache_init(fs->fs_dev, fs->block_shift);
665
666	btrfs_read_super_block(fs);
667	if (bfs->sb.magic != BTRFS_MAGIC_N)
668		return -1;
669	bfs->tree_buf = malloc(max(bfs->sb.nodesize, bfs->sb.leafsize));
670	if (!bfs->tree_buf)
671		return -1;
672	btrfs_read_sys_chunk_array(fs);
673	btrfs_read_chunk_tree(fs);
674	btrfs_get_fs_tree(fs);
675
676	return fs->block_shift;
677}
678
679const struct fs_ops btrfs_fs_ops = {
680    .fs_name       = "btrfs",
681    .fs_flags      = 0,
682    .fs_init       = btrfs_fs_init,
683    .iget_root     = btrfs_iget_root,
684    .iget          = btrfs_iget,
685    .readlink      = btrfs_readlink,
686    .getfssec      = btrfs_getfssec,
687    .close_file    = generic_close_file,
688    .mangle_name   = generic_mangle_name,
689    .next_extent   = btrfs_next_extent,
690    .readdir       = btrfs_readdir,
691    .chdir_start   = generic_chdir_start,
692    .open_config   = generic_open_config,
693    .fs_uuid       = NULL,
694};
695