inode.c revision f1a0e7f69b4afccca1ea323ca2e46bb1d63181da
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * inode.c
22 */
23
24#include <linux/squashfs_fs.h>
25#include <linux/module.h>
26#include <linux/zlib.h>
27#include <linux/fs.h>
28#include <linux/squashfs_fs_sb.h>
29#include <linux/squashfs_fs_i.h>
30#include <linux/buffer_head.h>
31#include <linux/vfs.h>
32#include <linux/vmalloc.h>
33#include <linux/smp_lock.h>
34
35#include "squashfs.h"
36
37static int squashfs_cached_blks;
38
39static void vfs_read_inode(struct inode *i);
40static struct dentry *squashfs_get_parent(struct dentry *child);
41static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode);
42static int squashfs_statfs(struct dentry *, struct kstatfs *);
43static int squashfs_symlink_readpage(struct file *file, struct page *page);
44static long long read_blocklist(struct inode *inode, int index,
45				int readahead_blks, char *block_list,
46				unsigned short **block_p, unsigned int *bsize);
47static int squashfs_readpage(struct file *file, struct page *page);
48static int squashfs_readdir(struct file *, void *, filldir_t);
49static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
50				struct nameidata *);
51static int squashfs_remount(struct super_block *s, int *flags, char *data);
52static void squashfs_put_super(struct super_block *);
53static int squashfs_get_sb(struct file_system_type *,int, const char *, void *,
54				struct vfsmount *);
55static struct inode *squashfs_alloc_inode(struct super_block *sb);
56static void squashfs_destroy_inode(struct inode *inode);
57static int init_inodecache(void);
58static void destroy_inodecache(void);
59
60static struct file_system_type squashfs_fs_type = {
61	.owner = THIS_MODULE,
62	.name = "squashfs",
63	.get_sb = squashfs_get_sb,
64	.kill_sb = kill_block_super,
65	.fs_flags = FS_REQUIRES_DEV
66};
67
68static const unsigned char squashfs_filetype_table[] = {
69	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
70};
71
72static struct super_operations squashfs_super_ops = {
73	.alloc_inode = squashfs_alloc_inode,
74	.destroy_inode = squashfs_destroy_inode,
75	.statfs = squashfs_statfs,
76	.put_super = squashfs_put_super,
77	.remount_fs = squashfs_remount
78};
79
80static struct super_operations squashfs_export_super_ops = {
81	.alloc_inode = squashfs_alloc_inode,
82	.destroy_inode = squashfs_destroy_inode,
83	.statfs = squashfs_statfs,
84	.put_super = squashfs_put_super,
85	.read_inode = vfs_read_inode
86};
87
88static struct export_operations squashfs_export_ops = {
89	.get_parent = squashfs_get_parent
90};
91
92SQSH_EXTERN const struct address_space_operations squashfs_symlink_aops = {
93	.readpage = squashfs_symlink_readpage
94};
95
96SQSH_EXTERN const struct address_space_operations squashfs_aops = {
97	.readpage = squashfs_readpage
98};
99
100static const struct file_operations squashfs_dir_ops = {
101	.read = generic_read_dir,
102	.readdir = squashfs_readdir
103};
104
105SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
106	.lookup = squashfs_lookup
107};
108
109
110static struct buffer_head *get_block_length(struct super_block *s,
111				int *cur_index, int *offset, int *c_byte)
112{
113	struct squashfs_sb_info *msblk = s->s_fs_info;
114	unsigned short temp;
115	struct buffer_head *bh;
116
117	if (!(bh = sb_bread(s, *cur_index)))
118		goto out;
119
120	if (msblk->devblksize - *offset == 1) {
121		if (msblk->swap)
122			((unsigned char *) &temp)[1] = *((unsigned char *)
123				(bh->b_data + *offset));
124		else
125			((unsigned char *) &temp)[0] = *((unsigned char *)
126				(bh->b_data + *offset));
127		brelse(bh);
128		if (!(bh = sb_bread(s, ++(*cur_index))))
129			goto out;
130		if (msblk->swap)
131			((unsigned char *) &temp)[0] = *((unsigned char *)
132				bh->b_data);
133		else
134			((unsigned char *) &temp)[1] = *((unsigned char *)
135				bh->b_data);
136		*c_byte = temp;
137		*offset = 1;
138	} else {
139		if (msblk->swap) {
140			((unsigned char *) &temp)[1] = *((unsigned char *)
141				(bh->b_data + *offset));
142			((unsigned char *) &temp)[0] = *((unsigned char *)
143				(bh->b_data + *offset + 1));
144		} else {
145			((unsigned char *) &temp)[0] = *((unsigned char *)
146				(bh->b_data + *offset));
147			((unsigned char *) &temp)[1] = *((unsigned char *)
148				(bh->b_data + *offset + 1));
149		}
150		*c_byte = temp;
151		*offset += 2;
152	}
153
154	if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
155		if (*offset == msblk->devblksize) {
156			brelse(bh);
157			if (!(bh = sb_bread(s, ++(*cur_index))))
158				goto out;
159			*offset = 0;
160		}
161		if (*((unsigned char *) (bh->b_data + *offset)) !=
162						SQUASHFS_MARKER_BYTE) {
163			ERROR("Metadata block marker corrupt @ %x\n",
164						*cur_index);
165			brelse(bh);
166			goto out;
167		}
168		(*offset)++;
169	}
170	return bh;
171
172out:
173	return NULL;
174}
175
176
177SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
178			long long index, unsigned int length,
179			long long *next_index, int srclength)
180{
181	struct squashfs_sb_info *msblk = s->s_fs_info;
182	struct squashfs_super_block *sblk = &msblk->sblk;
183	struct buffer_head **bh;
184	unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
185	unsigned int cur_index = index >> msblk->devblksize_log2;
186	int bytes, avail_bytes, b = 0, k = 0;
187	unsigned int compressed;
188	unsigned int c_byte = length;
189
190	bh = kmalloc(((sblk->block_size >> msblk->devblksize_log2) + 1) *
191								sizeof(struct buffer_head *), GFP_KERNEL);
192	if (bh == NULL)
193		goto read_failure;
194
195	if (c_byte) {
196		bytes = msblk->devblksize - offset;
197		compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
198		c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
199
200		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index,
201					compressed ? "" : "un", (unsigned int) c_byte, srclength);
202
203		if (c_byte > srclength || index < 0 || (index + c_byte) > sblk->bytes_used)
204			goto read_failure;
205
206		bh[0] = sb_getblk(s, cur_index);
207		if (bh[0] == NULL)
208			goto block_release;
209
210		for (b = 1; bytes < c_byte; b++) {
211			bh[b] = sb_getblk(s, ++cur_index);
212			if (bh[b] == NULL)
213				goto block_release;
214			bytes += msblk->devblksize;
215		}
216		ll_rw_block(READ, b, bh);
217	} else {
218		if (index < 0 || (index + 2) > sblk->bytes_used)
219			goto read_failure;
220
221		bh[0] = get_block_length(s, &cur_index, &offset, &c_byte);
222		if (bh[0] == NULL)
223			goto read_failure;
224
225		bytes = msblk->devblksize - offset;
226		compressed = SQUASHFS_COMPRESSED(c_byte);
227		c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
228
229		TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
230					? "" : "un", (unsigned int) c_byte);
231
232		if (c_byte > srclength || (index + c_byte) > sblk->bytes_used)
233			goto read_failure;
234
235		for (b = 1; bytes < c_byte; b++) {
236			bh[b] = sb_getblk(s, ++cur_index);
237			if (bh[b] == NULL)
238				goto block_release;
239			bytes += msblk->devblksize;
240		}
241		ll_rw_block(READ, b - 1, bh + 1);
242	}
243
244	if (compressed) {
245		int zlib_err = 0;
246
247		/*
248	 	* uncompress block
249	 	*/
250
251		mutex_lock(&msblk->read_data_mutex);
252
253		msblk->stream.next_out = buffer;
254		msblk->stream.avail_out = srclength;
255
256		for (bytes = 0; k < b; k++) {
257			avail_bytes = min(c_byte - bytes, msblk->devblksize - offset);
258
259			wait_on_buffer(bh[k]);
260			if (!buffer_uptodate(bh[k]))
261				goto release_mutex;
262
263			msblk->stream.next_in = bh[k]->b_data + offset;
264			msblk->stream.avail_in = avail_bytes;
265
266			if (k == 0) {
267				zlib_err = zlib_inflateInit(&msblk->stream);
268				if (zlib_err != Z_OK) {
269					ERROR("zlib_inflateInit returned unexpected result 0x%x,"
270						" srclength %d\n", zlib_err, srclength);
271					goto release_mutex;
272				}
273
274				if (avail_bytes == 0) {
275					offset = 0;
276					brelse(bh[k]);
277					continue;
278				}
279			}
280
281			zlib_err = zlib_inflate(&msblk->stream, Z_NO_FLUSH);
282			if (zlib_err != Z_OK && zlib_err != Z_STREAM_END) {
283				ERROR("zlib_inflate returned unexpected result 0x%x,"
284					" srclength %d, avail_in %d, avail_out %d\n", zlib_err,
285					srclength, msblk->stream.avail_in, msblk->stream.avail_out);
286				goto release_mutex;
287			}
288
289			bytes += avail_bytes;
290			offset = 0;
291			brelse(bh[k]);
292		}
293
294		if (zlib_err != Z_STREAM_END)
295			goto release_mutex;
296
297		zlib_err = zlib_inflateEnd(&msblk->stream);
298		if (zlib_err != Z_OK) {
299			ERROR("zlib_inflateEnd returned unexpected result 0x%x,"
300				" srclength %d\n", zlib_err, srclength);
301			goto release_mutex;
302		}
303		bytes = msblk->stream.total_out;
304		mutex_unlock(&msblk->read_data_mutex);
305	} else {
306		int i;
307
308		for(i = 0; i < b; i++) {
309			wait_on_buffer(bh[i]);
310			if (!buffer_uptodate(bh[i]))
311				goto block_release;
312		}
313
314		for (bytes = 0; k < b; k++) {
315			avail_bytes = min(c_byte - bytes, msblk->devblksize - offset);
316
317			memcpy(buffer + bytes, bh[k]->b_data + offset, avail_bytes);
318			bytes += avail_bytes;
319			offset = 0;
320			brelse(bh[k]);
321		}
322	}
323
324	if (next_index)
325		*next_index = index + c_byte + (length ? 0 :
326				(SQUASHFS_CHECK_DATA(msblk->sblk.flags) ? 3 : 2));
327
328	kfree(bh);
329	return bytes;
330
331release_mutex:
332	mutex_unlock(&msblk->read_data_mutex);
333
334block_release:
335	for (; k < b; k++)
336		brelse(bh[k]);
337
338read_failure:
339	ERROR("sb_bread failed reading block 0x%x\n", cur_index);
340	kfree(bh);
341	return 0;
342}
343
344
345SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, void *buffer,
346				long long block, unsigned int offset,
347				int length, long long *next_block,
348				unsigned int *next_offset)
349{
350	struct squashfs_sb_info *msblk = s->s_fs_info;
351	int n, i, bytes, return_length = length;
352	long long next_index;
353
354	TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
355
356	while (1) {
357		for (i = 0; i < squashfs_cached_blks; i++)
358			if (msblk->block_cache[i].block == block)
359				break;
360
361		mutex_lock(&msblk->block_cache_mutex);
362
363		if (i == squashfs_cached_blks) {
364			/* read inode header block */
365			if (msblk->unused_cache_blks == 0) {
366				mutex_unlock(&msblk->block_cache_mutex);
367				wait_event(msblk->waitq, msblk->unused_cache_blks);
368				continue;
369			}
370
371			i = msblk->next_cache;
372			for (n = 0; n < squashfs_cached_blks; n++) {
373				if (msblk->block_cache[i].block != SQUASHFS_USED_BLK)
374					break;
375				i = (i + 1) % squashfs_cached_blks;
376			}
377
378			msblk->next_cache = (i + 1) % squashfs_cached_blks;
379
380			if (msblk->block_cache[i].block == SQUASHFS_INVALID_BLK) {
381				msblk->block_cache[i].data = vmalloc(SQUASHFS_METADATA_SIZE);
382				if (msblk->block_cache[i].data == NULL) {
383					ERROR("Failed to allocate cache block\n");
384					mutex_unlock(&msblk->block_cache_mutex);
385					goto out;
386				}
387			}
388
389			msblk->block_cache[i].block = SQUASHFS_USED_BLK;
390			msblk->unused_cache_blks --;
391			mutex_unlock(&msblk->block_cache_mutex);
392
393			msblk->block_cache[i].length = squashfs_read_data(s,
394				msblk->block_cache[i].data, block, 0, &next_index,
395				SQUASHFS_METADATA_SIZE);
396
397			if (msblk->block_cache[i].length == 0) {
398				ERROR("Unable to read cache block [%llx:%x]\n", block, offset);
399				mutex_lock(&msblk->block_cache_mutex);
400				msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
401				msblk->unused_cache_blks ++;
402				smp_mb();
403				vfree(msblk->block_cache[i].data);
404				wake_up(&msblk->waitq);
405				mutex_unlock(&msblk->block_cache_mutex);
406				goto out;
407			}
408
409			mutex_lock(&msblk->block_cache_mutex);
410			msblk->block_cache[i].block = block;
411			msblk->block_cache[i].next_index = next_index;
412			msblk->unused_cache_blks ++;
413			smp_mb();
414			wake_up(&msblk->waitq);
415			TRACE("Read cache block [%llx:%x]\n", block, offset);
416		}
417
418		if (msblk->block_cache[i].block != block) {
419			mutex_unlock(&msblk->block_cache_mutex);
420			continue;
421		}
422
423		bytes = msblk->block_cache[i].length - offset;
424
425		if (bytes < 1) {
426			mutex_unlock(&msblk->block_cache_mutex);
427			goto out;
428		} else if (bytes >= length) {
429			if (buffer)
430				memcpy(buffer, msblk->block_cache[i].data + offset, length);
431			if (msblk->block_cache[i].length - offset == length) {
432				*next_block = msblk->block_cache[i].next_index;
433				*next_offset = 0;
434			} else {
435				*next_block = block;
436				*next_offset = offset + length;
437			}
438			mutex_unlock(&msblk->block_cache_mutex);
439			goto finish;
440		} else {
441			if (buffer) {
442				memcpy(buffer, msblk->block_cache[i].data + offset, bytes);
443				buffer = (char *) buffer + bytes;
444			}
445			block = msblk->block_cache[i].next_index;
446			mutex_unlock(&msblk->block_cache_mutex);
447			length -= bytes;
448			offset = 0;
449		}
450	}
451
452finish:
453	return return_length;
454out:
455	return 0;
456}
457
458
459static int get_fragment_location(struct super_block *s, unsigned int fragment,
460				long long *fragment_start_block,
461				unsigned int *fragment_size)
462{
463	struct squashfs_sb_info *msblk = s->s_fs_info;
464	long long start_block =
465		msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
466	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
467	struct squashfs_fragment_entry fragment_entry;
468
469	if (msblk->swap) {
470		struct squashfs_fragment_entry sfragment_entry;
471
472		if (!squashfs_get_cached_block(s, &sfragment_entry, start_block, offset,
473					 sizeof(sfragment_entry), &start_block, &offset))
474			goto out;
475		SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
476	} else
477		if (!squashfs_get_cached_block(s, &fragment_entry, start_block, offset,
478					 sizeof(fragment_entry), &start_block, &offset))
479			goto out;
480
481	*fragment_start_block = fragment_entry.start_block;
482	*fragment_size = fragment_entry.size;
483
484	return 1;
485
486out:
487	return 0;
488}
489
490
491SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk,
492				struct squashfs_fragment_cache *fragment)
493{
494	mutex_lock(&msblk->fragment_mutex);
495	fragment->locked --;
496	if (fragment->locked == 0) {
497		msblk->unused_frag_blks ++;
498		smp_mb();
499		wake_up(&msblk->fragment_wait_queue);
500	}
501	mutex_unlock(&msblk->fragment_mutex);
502}
503
504
505SQSH_EXTERN
506struct squashfs_fragment_cache *get_cached_fragment(struct super_block *s,
507				long long start_block, int length)
508{
509	int i, n;
510	struct squashfs_sb_info *msblk = s->s_fs_info;
511	struct squashfs_super_block *sblk = &msblk->sblk;
512
513	while (1) {
514		mutex_lock(&msblk->fragment_mutex);
515
516		for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
517				msblk->fragment[i].block != start_block; i++);
518
519		if (i == SQUASHFS_CACHED_FRAGMENTS) {
520			if (msblk->unused_frag_blks == 0) {
521				mutex_unlock(&msblk->fragment_mutex);
522				wait_event(msblk->fragment_wait_queue, msblk->unused_frag_blks);
523				continue;
524			}
525
526			i = msblk->next_fragment;
527			for (n = 0; n < SQUASHFS_CACHED_FRAGMENTS; n++) {
528				if (msblk->fragment[i].locked == 0)
529					break;
530				i = (i + 1) % SQUASHFS_CACHED_FRAGMENTS;
531			}
532
533			msblk->next_fragment = (msblk->next_fragment + 1) %
534				SQUASHFS_CACHED_FRAGMENTS;
535
536			if (msblk->fragment[i].data == NULL) {
537				msblk->fragment[i].data = vmalloc(sblk->block_size);
538				if (msblk->fragment[i].data == NULL) {
539					ERROR("Failed to allocate fragment cache block\n");
540					mutex_unlock(&msblk->fragment_mutex);
541					goto out;
542				}
543			}
544
545			msblk->unused_frag_blks --;
546			msblk->fragment[i].block = start_block;
547			msblk->fragment[i].locked = 1;
548			msblk->fragment[i].pending = 1;
549			msblk->fragment[i].error = 0;
550			mutex_unlock(&msblk->fragment_mutex);
551
552			msblk->fragment[i].length = squashfs_read_data(s,
553				msblk->fragment[i].data, start_block, length, NULL,
554				sblk->block_size);
555
556			mutex_lock(&msblk->fragment_mutex);
557
558			if (msblk->fragment[i].length == 0) {
559				ERROR("Unable to read fragment cache block [%llx]\n", start_block);
560				msblk->fragment[i].error = 1;
561			}
562
563			TRACE("New fragment %d, start block %lld, locked %d\n",
564				i, msblk->fragment[i].block, msblk->fragment[i].locked);
565
566			msblk->fragment[i].pending = 0;
567			smp_mb();
568			wake_up_all(&msblk->fragment[i].wait_queue);
569			mutex_unlock(&msblk->fragment_mutex);
570			break;
571		}
572
573		if (msblk->fragment[i].locked == 0)
574			msblk->unused_frag_blks --;
575		msblk->fragment[i].locked++;
576
577		if (msblk->fragment[i].pending) {
578			mutex_unlock(&msblk->fragment_mutex);
579			wait_event(msblk->fragment[i].wait_queue, !msblk->fragment[i].pending);
580		} else
581			mutex_unlock(&msblk->fragment_mutex);
582		TRACE("Got fragment %d, start block %lld, locked %d\n", i,
583			msblk->fragment[i].block, msblk->fragment[i].locked);
584		break;
585	}
586
587	return &msblk->fragment[i];
588
589out:
590	return NULL;
591}
592
593
594static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i,
595				struct squashfs_base_inode_header *inodeb)
596{
597	i->i_ino = inodeb->inode_number;
598	i->i_mtime.tv_sec = inodeb->mtime;
599	i->i_atime.tv_sec = inodeb->mtime;
600	i->i_ctime.tv_sec = inodeb->mtime;
601	i->i_uid = msblk->uid[inodeb->uid];
602	i->i_mode = inodeb->mode;
603	i->i_size = 0;
604
605	if (inodeb->guid == SQUASHFS_GUIDS)
606		i->i_gid = i->i_uid;
607	else
608		i->i_gid = msblk->guid[inodeb->guid];
609}
610
611
612static squashfs_inode_t squashfs_inode_lookup(struct super_block *s, int ino)
613{
614	struct squashfs_sb_info *msblk = s->s_fs_info;
615	long long start = msblk->inode_lookup_table[SQUASHFS_LOOKUP_BLOCK(ino - 1)];
616	int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino - 1);
617	squashfs_inode_t inode;
618
619	TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino);
620
621	if (msblk->swap) {
622		squashfs_inode_t sinode;
623
624		if (!squashfs_get_cached_block(s, &sinode, start, offset,
625					sizeof(sinode), &start, &offset))
626			goto out;
627		SQUASHFS_SWAP_INODE_T((&inode), &sinode);
628	} else if (!squashfs_get_cached_block(s, &inode, start, offset,
629					sizeof(inode), &start, &offset))
630			goto out;
631
632	TRACE("squashfs_inode_lookup, inode = 0x%llx\n", inode);
633
634	return inode;
635
636out:
637	return SQUASHFS_INVALID_BLK;
638}
639
640
641static void vfs_read_inode(struct inode *i)
642{
643	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
644	squashfs_inode_t inode = squashfs_inode_lookup(i->i_sb, i->i_ino);
645
646	TRACE("Entered vfs_read_inode\n");
647
648	if(inode != SQUASHFS_INVALID_BLK)
649		(msblk->read_inode)(i, inode);
650}
651
652
653static struct dentry *squashfs_get_parent(struct dentry *child)
654{
655	struct inode *i = child->d_inode;
656	struct inode *parent = iget(i->i_sb, SQUASHFS_I(i)->u.s2.parent_inode);
657	struct dentry *rv;
658
659	TRACE("Entered squashfs_get_parent\n");
660
661	if(parent == NULL) {
662		rv = ERR_PTR(-EACCES);
663		goto out;
664	}
665
666	rv = d_alloc_anon(parent);
667	if(rv == NULL)
668		rv = ERR_PTR(-ENOMEM);
669
670out:
671	return rv;
672}
673
674
675SQSH_EXTERN struct inode *squashfs_iget(struct super_block *s,
676				squashfs_inode_t inode, unsigned int inode_number)
677{
678	struct squashfs_sb_info *msblk = s->s_fs_info;
679	struct inode *i = iget_locked(s, inode_number);
680
681	TRACE("Entered squashfs_iget\n");
682
683	if(i && (i->i_state & I_NEW)) {
684		(msblk->read_inode)(i, inode);
685		unlock_new_inode(i);
686	}
687
688	return i;
689}
690
691
692static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode)
693{
694	struct super_block *s = i->i_sb;
695	struct squashfs_sb_info *msblk = s->s_fs_info;
696	struct squashfs_super_block *sblk = &msblk->sblk;
697	long long block = SQUASHFS_INODE_BLK(inode) + sblk->inode_table_start;
698	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
699	long long next_block;
700	unsigned int next_offset;
701	union squashfs_inode_header id, sid;
702	struct squashfs_base_inode_header *inodeb = &id.base, *sinodeb = &sid.base;
703
704	TRACE("Entered squashfs_read_inode\n");
705
706	if (msblk->swap) {
707		if (!squashfs_get_cached_block(s, sinodeb, block, offset,
708					sizeof(*sinodeb), &next_block, &next_offset))
709			goto failed_read;
710		SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb, sizeof(*sinodeb));
711	} else
712		if (!squashfs_get_cached_block(s, inodeb, block, offset,
713					sizeof(*inodeb), &next_block, &next_offset))
714			goto failed_read;
715
716	squashfs_new_inode(msblk, i, inodeb);
717
718	switch(inodeb->inode_type) {
719		case SQUASHFS_FILE_TYPE: {
720			unsigned int frag_size;
721			long long frag_blk;
722			struct squashfs_reg_inode_header *inodep = &id.reg;
723			struct squashfs_reg_inode_header *sinodep = &sid.reg;
724
725			if (msblk->swap) {
726				if (!squashfs_get_cached_block(s, sinodep, block, offset,
727						sizeof(*sinodep), &next_block, &next_offset))
728					goto failed_read;
729				SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
730			} else
731				if (!squashfs_get_cached_block(s, inodep, block, offset,
732						sizeof(*inodep), &next_block, &next_offset))
733					goto failed_read;
734
735			frag_blk = SQUASHFS_INVALID_BLK;
736
737			if (inodep->fragment != SQUASHFS_INVALID_FRAG)
738					if(!get_fragment_location(s, inodep->fragment, &frag_blk,
739												&frag_size))
740						goto failed_read;
741
742			i->i_nlink = 1;
743			i->i_size = inodep->file_size;
744			i->i_fop = &generic_ro_fops;
745			i->i_mode |= S_IFREG;
746			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
747			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
748			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
749			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
750			SQUASHFS_I(i)->start_block = inodep->start_block;
751			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
752			SQUASHFS_I(i)->offset = next_offset;
753			i->i_data.a_ops = &squashfs_aops;
754
755			TRACE("File inode %x:%x, start_block %llx, "
756					"block_list_start %llx, offset %x\n",
757					SQUASHFS_INODE_BLK(inode), offset,
758					inodep->start_block, next_block,
759					next_offset);
760			break;
761		}
762		case SQUASHFS_LREG_TYPE: {
763			unsigned int frag_size;
764			long long frag_blk;
765			struct squashfs_lreg_inode_header *inodep = &id.lreg;
766			struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
767
768			if (msblk->swap) {
769				if (!squashfs_get_cached_block(s, sinodep, block, offset,
770						sizeof(*sinodep), &next_block, &next_offset))
771					goto failed_read;
772				SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
773			} else
774				if (!squashfs_get_cached_block(s, inodep, block, offset,
775						sizeof(*inodep), &next_block, &next_offset))
776					goto failed_read;
777
778			frag_blk = SQUASHFS_INVALID_BLK;
779
780			if (inodep->fragment != SQUASHFS_INVALID_FRAG)
781				if (!get_fragment_location(s, inodep->fragment, &frag_blk,
782												 &frag_size))
783					goto failed_read;
784
785			i->i_nlink = inodep->nlink;
786			i->i_size = inodep->file_size;
787			i->i_fop = &generic_ro_fops;
788			i->i_mode |= S_IFREG;
789			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
790			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
791			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
792			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
793			SQUASHFS_I(i)->start_block = inodep->start_block;
794			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
795			SQUASHFS_I(i)->offset = next_offset;
796			i->i_data.a_ops = &squashfs_aops;
797
798			TRACE("File inode %x:%x, start_block %llx, "
799					"block_list_start %llx, offset %x\n",
800					SQUASHFS_INODE_BLK(inode), offset,
801					inodep->start_block, next_block,
802					next_offset);
803			break;
804		}
805		case SQUASHFS_DIR_TYPE: {
806			struct squashfs_dir_inode_header *inodep = &id.dir;
807			struct squashfs_dir_inode_header *sinodep = &sid.dir;
808
809			if (msblk->swap) {
810				if (!squashfs_get_cached_block(s, sinodep, block, offset,
811						sizeof(*sinodep), &next_block, &next_offset))
812					goto failed_read;
813				SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
814			} else
815				if (!squashfs_get_cached_block(s, inodep, block, offset,
816						sizeof(*inodep), &next_block, &next_offset))
817					goto failed_read;
818
819			i->i_nlink = inodep->nlink;
820			i->i_size = inodep->file_size;
821			i->i_op = &squashfs_dir_inode_ops;
822			i->i_fop = &squashfs_dir_ops;
823			i->i_mode |= S_IFDIR;
824			SQUASHFS_I(i)->start_block = inodep->start_block;
825			SQUASHFS_I(i)->offset = inodep->offset;
826			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
827			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
828
829			TRACE("Directory inode %x:%x, start_block %x, offset "
830					"%x\n", SQUASHFS_INODE_BLK(inode),
831					offset, inodep->start_block,
832					inodep->offset);
833			break;
834		}
835		case SQUASHFS_LDIR_TYPE: {
836			struct squashfs_ldir_inode_header *inodep = &id.ldir;
837			struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
838
839			if (msblk->swap) {
840				if (!squashfs_get_cached_block(s, sinodep, block, offset,
841						sizeof(*sinodep), &next_block, &next_offset))
842					goto failed_read;
843				SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep, sinodep);
844			} else
845				if (!squashfs_get_cached_block(s, inodep, block, offset,
846						sizeof(*inodep), &next_block, &next_offset))
847					goto failed_read;
848
849			i->i_nlink = inodep->nlink;
850			i->i_size = inodep->file_size;
851			i->i_op = &squashfs_dir_inode_ops;
852			i->i_fop = &squashfs_dir_ops;
853			i->i_mode |= S_IFDIR;
854			SQUASHFS_I(i)->start_block = inodep->start_block;
855			SQUASHFS_I(i)->offset = inodep->offset;
856			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
857			SQUASHFS_I(i)->u.s2.directory_index_offset = next_offset;
858			SQUASHFS_I(i)->u.s2.directory_index_count = inodep->i_count;
859			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
860
861			TRACE("Long directory inode %x:%x, start_block %x, offset %x\n",
862					SQUASHFS_INODE_BLK(inode), offset,
863					inodep->start_block, inodep->offset);
864			break;
865		}
866		case SQUASHFS_SYMLINK_TYPE: {
867			struct squashfs_symlink_inode_header *inodep = &id.symlink;
868			struct squashfs_symlink_inode_header *sinodep = &sid.symlink;
869
870			if (msblk->swap) {
871				if (!squashfs_get_cached_block(s, sinodep, block, offset,
872						sizeof(*sinodep), &next_block, &next_offset))
873					goto failed_read;
874				SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep, sinodep);
875			} else
876				if (!squashfs_get_cached_block(s, inodep, block, offset,
877						sizeof(*inodep), &next_block, &next_offset))
878					goto failed_read;
879
880			i->i_nlink = inodep->nlink;
881			i->i_size = inodep->symlink_size;
882			i->i_op = &page_symlink_inode_operations;
883			i->i_data.a_ops = &squashfs_symlink_aops;
884			i->i_mode |= S_IFLNK;
885			SQUASHFS_I(i)->start_block = next_block;
886			SQUASHFS_I(i)->offset = next_offset;
887
888			TRACE("Symbolic link inode %x:%x, start_block %llx, offset %x\n",
889					SQUASHFS_INODE_BLK(inode), offset,
890					next_block, next_offset);
891			break;
892		 }
893		 case SQUASHFS_BLKDEV_TYPE:
894		 case SQUASHFS_CHRDEV_TYPE: {
895			struct squashfs_dev_inode_header *inodep = &id.dev;
896			struct squashfs_dev_inode_header *sinodep = &sid.dev;
897
898			if (msblk->swap) {
899				if (!squashfs_get_cached_block(s, sinodep, block, offset,
900						sizeof(*sinodep), &next_block, &next_offset))
901					goto failed_read;
902				SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
903			} else
904				if (!squashfs_get_cached_block(s, inodep, block, offset,
905						sizeof(*inodep), &next_block, &next_offset))
906					goto failed_read;
907
908			i->i_nlink = inodep->nlink;
909			i->i_mode |= (inodeb->inode_type == SQUASHFS_CHRDEV_TYPE) ?
910					S_IFCHR : S_IFBLK;
911			init_special_inode(i, i->i_mode, old_decode_dev(inodep->rdev));
912
913			TRACE("Device inode %x:%x, rdev %x\n",
914					SQUASHFS_INODE_BLK(inode), offset, inodep->rdev);
915			break;
916		 }
917		 case SQUASHFS_FIFO_TYPE:
918		 case SQUASHFS_SOCKET_TYPE: {
919			struct squashfs_ipc_inode_header *inodep = &id.ipc;
920			struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
921
922			if (msblk->swap) {
923				if (!squashfs_get_cached_block(s, sinodep, block, offset,
924						sizeof(*sinodep), &next_block, &next_offset))
925					goto failed_read;
926				SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
927			} else
928				if (!squashfs_get_cached_block(s, inodep, block, offset,
929						sizeof(*inodep), &next_block, &next_offset))
930					goto failed_read;
931
932			i->i_nlink = inodep->nlink;
933			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
934							? S_IFIFO : S_IFSOCK;
935			init_special_inode(i, i->i_mode, 0);
936			break;
937		 }
938		 default:
939			ERROR("Unknown inode type %d in squashfs_iget!\n",
940					inodeb->inode_type);
941			goto failed_read1;
942	}
943
944	return 1;
945
946failed_read:
947	ERROR("Unable to read inode [%llx:%x]\n", block, offset);
948
949failed_read1:
950	make_bad_inode(i);
951	return 0;
952}
953
954
955static int read_inode_lookup_table(struct super_block *s)
956{
957	struct squashfs_sb_info *msblk = s->s_fs_info;
958	struct squashfs_super_block *sblk = &msblk->sblk;
959	unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(sblk->inodes);
960
961	TRACE("In read_inode_lookup_table, length %d\n", length);
962
963	/* Allocate inode lookup table */
964	msblk->inode_lookup_table = kmalloc(length, GFP_KERNEL);
965	if (msblk->inode_lookup_table == NULL) {
966		ERROR("Failed to allocate inode lookup table\n");
967		return 0;
968	}
969
970	if (!squashfs_read_data(s, (char *) msblk->inode_lookup_table,
971			sblk->lookup_table_start, length |
972			SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
973		ERROR("unable to read inode lookup table\n");
974		return 0;
975	}
976
977	if (msblk->swap) {
978		int i;
979		long long block;
980
981		for (i = 0; i < SQUASHFS_LOOKUP_BLOCKS(sblk->inodes); i++) {
982			/* XXX */
983			SQUASHFS_SWAP_LOOKUP_BLOCKS((&block),
984						&msblk->inode_lookup_table[i], 1);
985			msblk->inode_lookup_table[i] = block;
986		}
987	}
988
989	return 1;
990}
991
992
993static int read_fragment_index_table(struct super_block *s)
994{
995	struct squashfs_sb_info *msblk = s->s_fs_info;
996	struct squashfs_super_block *sblk = &msblk->sblk;
997	unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments);
998
999	if(length == 0)
1000		return 1;
1001
1002	/* Allocate fragment index table */
1003	msblk->fragment_index = kmalloc(length, GFP_KERNEL);
1004	if (msblk->fragment_index == NULL) {
1005		ERROR("Failed to allocate fragment index table\n");
1006		return 0;
1007	}
1008
1009	if (!squashfs_read_data(s, (char *) msblk->fragment_index,
1010			sblk->fragment_table_start, length |
1011			SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
1012		ERROR("unable to read fragment index table\n");
1013		return 0;
1014	}
1015
1016	if (msblk->swap) {
1017		int i;
1018		long long fragment;
1019
1020		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments); i++) {
1021			/* XXX */
1022			SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
1023						&msblk->fragment_index[i], 1);
1024			msblk->fragment_index[i] = fragment;
1025		}
1026	}
1027
1028	return 1;
1029}
1030
1031
1032static int readahead_metadata(struct super_block *s)
1033{
1034	struct squashfs_sb_info *msblk = s->s_fs_info;
1035	int i;
1036
1037	squashfs_cached_blks = SQUASHFS_CACHED_BLKS;
1038
1039	/* Init inode_table block pointer array */
1040	msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
1041					squashfs_cached_blks, GFP_KERNEL);
1042	if (msblk->block_cache == NULL) {
1043		ERROR("Failed to allocate block cache\n");
1044		goto failed;
1045	}
1046
1047	for (i = 0; i < squashfs_cached_blks; i++)
1048		msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
1049
1050	msblk->next_cache = 0;
1051	msblk->unused_cache_blks = squashfs_cached_blks;
1052
1053	return 1;
1054
1055failed:
1056	return 0;
1057}
1058
1059
1060static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
1061{
1062	struct squashfs_super_block *sblk = &msblk->sblk;
1063
1064	msblk->read_inode = squashfs_read_inode;
1065	msblk->read_blocklist = read_blocklist;
1066	msblk->read_fragment_index_table = read_fragment_index_table;
1067
1068	if (sblk->s_major == 1) {
1069		if (!squashfs_1_0_supported(msblk)) {
1070			SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
1071				"are unsupported\n");
1072			SERROR("Please recompile with Squashfs 1.0 support enabled\n");
1073			return 0;
1074		}
1075	} else if (sblk->s_major == 2) {
1076		if (!squashfs_2_0_supported(msblk)) {
1077			SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
1078				"are unsupported\n");
1079			SERROR("Please recompile with Squashfs 2.0 support enabled\n");
1080			return 0;
1081		}
1082	} else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
1083			SQUASHFS_MINOR) {
1084		SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
1085				"filesystem\n", sblk->s_major, sblk->s_minor);
1086		SERROR("Please update your kernel\n");
1087		return 0;
1088	}
1089
1090	return 1;
1091}
1092
1093
1094static int squashfs_fill_super(struct super_block *s, void *data, int silent)
1095{
1096	struct squashfs_sb_info *msblk;
1097	struct squashfs_super_block *sblk;
1098	int i;
1099	char b[BDEVNAME_SIZE];
1100	struct inode *root;
1101
1102	TRACE("Entered squashfs_fill_superblock\n");
1103
1104	s->s_fs_info = kzalloc(sizeof(struct squashfs_sb_info), GFP_KERNEL);
1105	if (s->s_fs_info == NULL) {
1106		ERROR("Failed to allocate superblock\n");
1107		goto failure;
1108	}
1109	msblk = s->s_fs_info;
1110
1111	msblk->stream.workspace = vmalloc(zlib_inflate_workspacesize());
1112	if (msblk->stream.workspace == NULL) {
1113		ERROR("Failed to allocate zlib workspace\n");
1114		goto failure;
1115	}
1116	sblk = &msblk->sblk;
1117
1118	msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
1119	msblk->devblksize_log2 = ffz(~msblk->devblksize);
1120
1121	mutex_init(&msblk->read_data_mutex);
1122	mutex_init(&msblk->read_page_mutex);
1123	mutex_init(&msblk->block_cache_mutex);
1124	mutex_init(&msblk->fragment_mutex);
1125	mutex_init(&msblk->meta_index_mutex);
1126
1127	init_waitqueue_head(&msblk->waitq);
1128	init_waitqueue_head(&msblk->fragment_wait_queue);
1129
1130	/* sblk->bytes_used is checked in squashfs_read_data to ensure reads are not
1131 	 * beyond filesystem end.  As we're using squashfs_read_data to read sblk here,
1132 	 * first set sblk->bytes_used to a useful value */
1133	sblk->bytes_used = sizeof(struct squashfs_super_block);
1134	if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
1135					sizeof(struct squashfs_super_block) |
1136					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, sizeof(struct squashfs_super_block))) {
1137		SERROR("unable to read superblock\n");
1138		goto failed_mount;
1139	}
1140
1141	/* Check it is a SQUASHFS superblock */
1142	if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) {
1143		if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) {
1144			struct squashfs_super_block ssblk;
1145
1146			WARNING("Mounting a different endian SQUASHFS filesystem on %s\n",
1147				bdevname(s->s_bdev, b));
1148
1149			SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
1150			memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
1151			msblk->swap = 1;
1152		} else  {
1153			SERROR("Can't find a SQUASHFS superblock on %s\n",
1154							bdevname(s->s_bdev, b));
1155			goto failed_mount;
1156		}
1157	}
1158
1159	/* Check the MAJOR & MINOR versions */
1160	if(!supported_squashfs_filesystem(msblk, silent))
1161		goto failed_mount;
1162
1163	/* Check the filesystem does not extend beyond the end of the
1164	   block device */
1165	if(sblk->bytes_used < 0 || sblk->bytes_used > i_size_read(s->s_bdev->bd_inode))
1166		goto failed_mount;
1167
1168	/* Check the root inode for sanity */
1169	if (SQUASHFS_INODE_OFFSET(sblk->root_inode) > SQUASHFS_METADATA_SIZE)
1170		goto failed_mount;
1171
1172	TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
1173	TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(sblk->flags)
1174					? "un" : "");
1175	TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
1176					? "un" : "");
1177	TRACE("Check data is %spresent in the filesystem\n",
1178					SQUASHFS_CHECK_DATA(sblk->flags) ?  "" : "not ");
1179	TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
1180	TRACE("Block size %d\n", sblk->block_size);
1181	TRACE("Number of inodes %d\n", sblk->inodes);
1182	if (sblk->s_major > 1)
1183		TRACE("Number of fragments %d\n", sblk->fragments);
1184	TRACE("Number of uids %d\n", sblk->no_uids);
1185	TRACE("Number of gids %d\n", sblk->no_guids);
1186	TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
1187	TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
1188	if (sblk->s_major > 1)
1189		TRACE("sblk->fragment_table_start %llx\n", sblk->fragment_table_start);
1190	TRACE("sblk->uid_start %llx\n", sblk->uid_start);
1191
1192	s->s_maxbytes = MAX_LFS_FILESIZE;
1193	s->s_flags |= MS_RDONLY;
1194	s->s_op = &squashfs_super_ops;
1195
1196	if (readahead_metadata(s) == 0)
1197		goto failed_mount;
1198
1199	/* Allocate read_page block */
1200	msblk->read_page = vmalloc(sblk->block_size);
1201	if (msblk->read_page == NULL) {
1202		ERROR("Failed to allocate read_page block\n");
1203		goto failed_mount;
1204	}
1205
1206	/* Allocate uid and gid tables */
1207	msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
1208					sizeof(unsigned int), GFP_KERNEL);
1209	if (msblk->uid == NULL) {
1210		ERROR("Failed to allocate uid/gid table\n");
1211		goto failed_mount;
1212	}
1213	msblk->guid = msblk->uid + sblk->no_uids;
1214
1215	if (msblk->swap) {
1216		unsigned int suid[sblk->no_uids + sblk->no_guids];
1217
1218		if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
1219					((sblk->no_uids + sblk->no_guids) *
1220					 sizeof(unsigned int)) |
1221					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
1222			ERROR("unable to read uid/gid table\n");
1223			goto failed_mount;
1224		}
1225
1226		SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
1227			sblk->no_guids), (sizeof(unsigned int) * 8));
1228	} else
1229		if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
1230					((sblk->no_uids + sblk->no_guids) *
1231					 sizeof(unsigned int)) |
1232					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
1233			ERROR("unable to read uid/gid table\n");
1234			goto failed_mount;
1235		}
1236
1237
1238	if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
1239		goto allocate_root;
1240
1241	msblk->fragment = kzalloc(sizeof(struct squashfs_fragment_cache) *
1242				SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL);
1243	if (msblk->fragment == NULL) {
1244		ERROR("Failed to allocate fragment block cache\n");
1245		goto failed_mount;
1246	}
1247
1248	for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
1249		msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
1250		init_waitqueue_head(&msblk->fragment[i].wait_queue);
1251	}
1252
1253	msblk->next_fragment = 0;
1254	msblk->unused_frag_blks = SQUASHFS_CACHED_FRAGMENTS;
1255
1256	/* Allocate and read fragment index table */
1257	if (msblk->read_fragment_index_table(s) == 0)
1258		goto failed_mount;
1259
1260	if(sblk->s_major < 3 || sblk->lookup_table_start == SQUASHFS_INVALID_BLK)
1261		goto allocate_root;
1262
1263	/* Allocate and read inode lookup table */
1264	if (read_inode_lookup_table(s) == 0)
1265		goto failed_mount;
1266
1267	s->s_op = &squashfs_export_super_ops;
1268	s->s_export_op = &squashfs_export_ops;
1269
1270allocate_root:
1271	root = new_inode(s);
1272	if ((msblk->read_inode)(root, sblk->root_inode) == 0)
1273		goto failed_mount;
1274	insert_inode_hash(root);
1275
1276	s->s_root = d_alloc_root(root);
1277	if (s->s_root == NULL) {
1278		ERROR("Root inode create failed\n");
1279		iput(root);
1280		goto failed_mount;
1281	}
1282
1283	TRACE("Leaving squashfs_fill_super\n");
1284	return 0;
1285
1286failed_mount:
1287	kfree(msblk->inode_lookup_table);
1288	kfree(msblk->fragment_index);
1289	kfree(msblk->fragment);
1290	kfree(msblk->uid);
1291	vfree(msblk->read_page);
1292	kfree(msblk->block_cache);
1293	kfree(msblk->fragment_index_2);
1294	vfree(msblk->stream.workspace);
1295	kfree(s->s_fs_info);
1296	s->s_fs_info = NULL;
1297	return -EINVAL;
1298
1299failure:
1300	return -ENOMEM;
1301}
1302
1303
1304static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1305{
1306	struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
1307	struct squashfs_super_block *sblk = &msblk->sblk;
1308
1309	TRACE("Entered squashfs_statfs\n");
1310
1311	buf->f_type = SQUASHFS_MAGIC;
1312	buf->f_bsize = sblk->block_size;
1313	buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
1314	buf->f_bfree = buf->f_bavail = 0;
1315	buf->f_files = sblk->inodes;
1316	buf->f_ffree = 0;
1317	buf->f_namelen = SQUASHFS_NAME_LEN;
1318
1319	return 0;
1320}
1321
1322
1323static int squashfs_symlink_readpage(struct file *file, struct page *page)
1324{
1325	struct inode *inode = page->mapping->host;
1326	int index = page->index << PAGE_CACHE_SHIFT, length, bytes, avail_bytes;
1327	long long block = SQUASHFS_I(inode)->start_block;
1328	int offset = SQUASHFS_I(inode)->offset;
1329	void *pageaddr = kmap(page);
1330
1331	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
1332				"%llx, offset %x\n", page->index,
1333				SQUASHFS_I(inode)->start_block,
1334				SQUASHFS_I(inode)->offset);
1335
1336	for (length = 0; length < index; length += bytes) {
1337		bytes = squashfs_get_cached_block(inode->i_sb, NULL, block,
1338				offset, PAGE_CACHE_SIZE, &block, &offset);
1339		if (bytes == 0) {
1340			ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
1341			goto skip_read;
1342		}
1343	}
1344
1345	if (length != index) {
1346		ERROR("(squashfs_symlink_readpage) length != index\n");
1347		bytes = 0;
1348		goto skip_read;
1349	}
1350
1351	avail_bytes = min_t(int, i_size_read(inode) - length, PAGE_CACHE_SIZE);
1352
1353	bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block, offset,
1354		avail_bytes, &block, &offset);
1355	if (bytes == 0)
1356		ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
1357
1358skip_read:
1359	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1360	kunmap(page);
1361	flush_dcache_page(page);
1362	SetPageUptodate(page);
1363	unlock_page(page);
1364
1365	return 0;
1366}
1367
1368
1369static struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
1370{
1371	struct meta_index *meta = NULL;
1372	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1373	int i;
1374
1375	mutex_lock(&msblk->meta_index_mutex);
1376
1377	TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
1378
1379	if (msblk->meta_index == NULL)
1380		goto not_allocated;
1381
1382	for (i = 0; i < SQUASHFS_META_NUMBER; i ++) {
1383		if (msblk->meta_index[i].inode_number == inode->i_ino &&
1384				msblk->meta_index[i].offset >= offset &&
1385				msblk->meta_index[i].offset <= index &&
1386				msblk->meta_index[i].locked == 0) {
1387			TRACE("locate_meta_index: entry %d, offset %d\n", i,
1388					msblk->meta_index[i].offset);
1389			meta = &msblk->meta_index[i];
1390			offset = meta->offset;
1391		}
1392	}
1393
1394	if (meta)
1395		meta->locked = 1;
1396
1397not_allocated:
1398	mutex_unlock(&msblk->meta_index_mutex);
1399
1400	return meta;
1401}
1402
1403
1404static struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
1405{
1406	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1407	struct meta_index *meta = NULL;
1408	int i;
1409
1410	mutex_lock(&msblk->meta_index_mutex);
1411
1412	TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
1413
1414	if (msblk->meta_index == NULL) {
1415		msblk->meta_index = kmalloc(sizeof(struct meta_index) *
1416					SQUASHFS_META_NUMBER, GFP_KERNEL);
1417		if (msblk->meta_index == NULL) {
1418			ERROR("Failed to allocate meta_index\n");
1419			goto failed;
1420		}
1421		for (i = 0; i < SQUASHFS_META_NUMBER; i++) {
1422			msblk->meta_index[i].inode_number = 0;
1423			msblk->meta_index[i].locked = 0;
1424		}
1425		msblk->next_meta_index = 0;
1426	}
1427
1428	for (i = SQUASHFS_META_NUMBER; i &&
1429			msblk->meta_index[msblk->next_meta_index].locked; i --)
1430		msblk->next_meta_index = (msblk->next_meta_index + 1) %
1431			SQUASHFS_META_NUMBER;
1432
1433	if (i == 0) {
1434		TRACE("empty_meta_index: failed!\n");
1435		goto failed;
1436	}
1437
1438	TRACE("empty_meta_index: returned meta entry %d, %p\n",
1439			msblk->next_meta_index,
1440			&msblk->meta_index[msblk->next_meta_index]);
1441
1442	meta = &msblk->meta_index[msblk->next_meta_index];
1443	msblk->next_meta_index = (msblk->next_meta_index + 1) %
1444			SQUASHFS_META_NUMBER;
1445
1446	meta->inode_number = inode->i_ino;
1447	meta->offset = offset;
1448	meta->skip = skip;
1449	meta->entries = 0;
1450	meta->locked = 1;
1451
1452failed:
1453	mutex_unlock(&msblk->meta_index_mutex);
1454	return meta;
1455}
1456
1457
1458static void release_meta_index(struct inode *inode, struct meta_index *meta)
1459{
1460	meta->locked = 0;
1461	smp_mb();
1462}
1463
1464
1465static int read_block_index(struct super_block *s, int blocks, char *block_list,
1466				long long *start_block, int *offset)
1467{
1468	struct squashfs_sb_info *msblk = s->s_fs_info;
1469	unsigned int *block_listp;
1470	int block = 0;
1471
1472	if (msblk->swap) {
1473		char sblock_list[blocks << 2];
1474
1475		if (!squashfs_get_cached_block(s, sblock_list, *start_block,
1476				*offset, blocks << 2, start_block, offset)) {
1477			ERROR("Fail reading block list [%llx:%x]\n", *start_block, *offset);
1478			goto failure;
1479		}
1480		SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
1481				((unsigned int *)sblock_list), blocks);
1482	} else {
1483		if (!squashfs_get_cached_block(s, block_list, *start_block,
1484				*offset, blocks << 2, start_block, offset)) {
1485			ERROR("Fail reading block list [%llx:%x]\n", *start_block, *offset);
1486			goto failure;
1487		}
1488	}
1489
1490	for (block_listp = (unsigned int *) block_list; blocks;
1491				block_listp++, blocks --)
1492		block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
1493
1494	return block;
1495
1496failure:
1497	return -1;
1498}
1499
1500
1501#define SIZE 256
1502
1503static inline int calculate_skip(int blocks) {
1504	int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
1505	return skip >= 7 ? 7 : skip + 1;
1506}
1507
1508
1509static int get_meta_index(struct inode *inode, int index,
1510		long long *index_block, int *index_offset,
1511		long long *data_block, char *block_list)
1512{
1513	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1514	struct squashfs_super_block *sblk = &msblk->sblk;
1515	int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
1516	int offset = 0;
1517	struct meta_index *meta;
1518	struct meta_entry *meta_entry;
1519	long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
1520	int cur_offset = SQUASHFS_I(inode)->offset;
1521	long long cur_data_block = SQUASHFS_I(inode)->start_block;
1522	int i;
1523
1524	index /= SQUASHFS_META_INDEXES * skip;
1525
1526	while (offset < index) {
1527		meta = locate_meta_index(inode, index, offset + 1);
1528
1529		if (meta == NULL) {
1530			meta = empty_meta_index(inode, offset + 1, skip);
1531			if (meta == NULL)
1532				goto all_done;
1533		} else {
1534			if(meta->entries == 0)
1535				goto failed;
1536			/* XXX */
1537			offset = index < meta->offset + meta->entries ? index :
1538				meta->offset + meta->entries - 1;
1539			/* XXX */
1540			meta_entry = &meta->meta_entry[offset - meta->offset];
1541			cur_index_block = meta_entry->index_block + sblk->inode_table_start;
1542			cur_offset = meta_entry->offset;
1543			cur_data_block = meta_entry->data_block;
1544			TRACE("get_meta_index: offset %d, meta->offset %d, "
1545				"meta->entries %d\n", offset, meta->offset, meta->entries);
1546			TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
1547				" data_block 0x%llx\n", cur_index_block,
1548				cur_offset, cur_data_block);
1549		}
1550
1551		for (i = meta->offset + meta->entries; i <= index &&
1552				i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
1553			int blocks = skip * SQUASHFS_META_INDEXES;
1554
1555			while (blocks) {
1556				int block = blocks > (SIZE >> 2) ? (SIZE >> 2) : blocks;
1557				int res = read_block_index(inode->i_sb, block, block_list,
1558					&cur_index_block, &cur_offset);
1559
1560				if (res == -1)
1561					goto failed;
1562
1563				cur_data_block += res;
1564				blocks -= block;
1565			}
1566
1567			meta_entry = &meta->meta_entry[i - meta->offset];
1568			meta_entry->index_block = cur_index_block - sblk->inode_table_start;
1569			meta_entry->offset = cur_offset;
1570			meta_entry->data_block = cur_data_block;
1571			meta->entries ++;
1572			offset ++;
1573		}
1574
1575		TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
1576				meta->offset, meta->entries);
1577
1578		release_meta_index(inode, meta);
1579	}
1580
1581all_done:
1582	*index_block = cur_index_block;
1583	*index_offset = cur_offset;
1584	*data_block = cur_data_block;
1585
1586	return offset * SQUASHFS_META_INDEXES * skip;
1587
1588failed:
1589	release_meta_index(inode, meta);
1590	return -1;
1591}
1592
1593
1594static long long read_blocklist(struct inode *inode, int index,
1595				int readahead_blks, char *block_list,
1596				unsigned short **block_p, unsigned int *bsize)
1597{
1598	long long block_ptr;
1599	int offset;
1600	long long block;
1601	int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
1602		block_list);
1603
1604	TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
1605		       " 0x%x, block 0x%llx\n", res, index, block_ptr, offset, block);
1606
1607	if(res == -1)
1608		goto failure;
1609
1610	index -= res;
1611
1612	while (index) {
1613		int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
1614		int res = read_block_index(inode->i_sb, blocks, block_list,
1615			&block_ptr, &offset);
1616		if (res == -1)
1617			goto failure;
1618		block += res;
1619		index -= blocks;
1620	}
1621
1622	if (read_block_index(inode->i_sb, 1, block_list, &block_ptr, &offset) == -1)
1623		goto failure;
1624	*bsize = *((unsigned int *) block_list);
1625
1626	return block;
1627
1628failure:
1629	return 0;
1630}
1631
1632
1633static int squashfs_readpage(struct file *file, struct page *page)
1634{
1635	struct inode *inode = page->mapping->host;
1636	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1637	struct squashfs_super_block *sblk = &msblk->sblk;
1638	unsigned char *block_list = NULL;
1639	long long block;
1640	unsigned int bsize, i;
1641	int bytes;
1642	int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
1643 	void *pageaddr;
1644	struct squashfs_fragment_cache *fragment = NULL;
1645	char *data_ptr = msblk->read_page;
1646
1647	int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
1648	int start_index = page->index & ~mask;
1649	int end_index = start_index | mask;
1650	int file_end = i_size_read(inode) >> sblk->block_log;
1651	int sparse = 0;
1652
1653	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
1654					page->index, SQUASHFS_I(inode)->start_block);
1655
1656	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1657					PAGE_CACHE_SHIFT))
1658		goto out;
1659
1660	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1661					|| index < file_end) {
1662		block_list = kmalloc(SIZE, GFP_KERNEL);
1663		if (block_list == NULL) {
1664			ERROR("Failed to allocate block_list\n");
1665			goto error_out;
1666		}
1667
1668		block = (msblk->read_blocklist)(inode, index, 1, block_list, NULL, &bsize);
1669		if (block == 0)
1670			goto error_out;
1671
1672		if (bsize == 0) { /* hole */
1673			bytes = index == file_end ?
1674				(i_size_read(inode) & (sblk->block_size - 1)) : sblk->block_size;
1675			sparse = 1;
1676		} else {
1677			mutex_lock(&msblk->read_page_mutex);
1678
1679			bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
1680				bsize, NULL, sblk->block_size);
1681
1682			if (bytes == 0) {
1683				ERROR("Unable to read page, block %llx, size %x\n", block, bsize);
1684				mutex_unlock(&msblk->read_page_mutex);
1685				goto error_out;
1686			}
1687		}
1688	} else {
1689		fragment = get_cached_fragment(inode->i_sb,
1690					SQUASHFS_I(inode)-> u.s1.fragment_start_block,
1691					SQUASHFS_I(inode)->u.s1.fragment_size);
1692
1693		if (fragment == NULL || fragment->error) {
1694			ERROR("Unable to read page, block %llx, size %x\n",
1695					SQUASHFS_I(inode)->u.s1.fragment_start_block,
1696					(int) SQUASHFS_I(inode)->u.s1.fragment_size);
1697			if (fragment)
1698				release_cached_fragment(msblk, fragment);
1699			goto error_out;
1700		}
1701		bytes = i_size_read(inode) & (sblk->block_size - 1);
1702		data_ptr = fragment->data + SQUASHFS_I(inode)->u.s1.fragment_offset;
1703	}
1704
1705	for (i = start_index; i <= end_index && bytes > 0; i++,
1706						bytes -= PAGE_CACHE_SIZE, data_ptr += PAGE_CACHE_SIZE) {
1707		struct page *push_page;
1708		int avail = sparse ? 0 : min_t(unsigned int, bytes, PAGE_CACHE_SIZE);
1709
1710		TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
1711
1712		push_page = (i == page->index) ? page :
1713			grab_cache_page_nowait(page->mapping, i);
1714
1715		if (!push_page)
1716			continue;
1717
1718		if (PageUptodate(push_page))
1719			goto skip_page;
1720
1721 		pageaddr = kmap_atomic(push_page, KM_USER0);
1722		memcpy(pageaddr, data_ptr, avail);
1723		memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
1724		kunmap_atomic(pageaddr, KM_USER0);
1725		flush_dcache_page(push_page);
1726		SetPageUptodate(push_page);
1727skip_page:
1728		unlock_page(push_page);
1729		if(i != page->index)
1730			page_cache_release(push_page);
1731	}
1732
1733	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1734					|| index < file_end) {
1735		if (!sparse)
1736			mutex_unlock(&msblk->read_page_mutex);
1737		kfree(block_list);
1738	} else
1739		release_cached_fragment(msblk, fragment);
1740
1741	return 0;
1742
1743error_out:
1744	SetPageError(page);
1745out:
1746	pageaddr = kmap_atomic(page, KM_USER0);
1747	memset(pageaddr, 0, PAGE_CACHE_SIZE);
1748	kunmap_atomic(pageaddr, KM_USER0);
1749	flush_dcache_page(page);
1750	if (!PageError(page))
1751		SetPageUptodate(page);
1752	unlock_page(page);
1753
1754	kfree(block_list);
1755	return 0;
1756}
1757
1758
1759static int get_dir_index_using_offset(struct super_block *s,
1760				long long *next_block, unsigned int *next_offset,
1761				long long index_start, unsigned int index_offset, int i_count,
1762				long long f_pos)
1763{
1764	struct squashfs_sb_info *msblk = s->s_fs_info;
1765	struct squashfs_super_block *sblk = &msblk->sblk;
1766	int i, length = 0;
1767	struct squashfs_dir_index index;
1768
1769	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
1770					i_count, (unsigned int) f_pos);
1771
1772	f_pos =- 3;
1773	if (f_pos == 0)
1774		goto finish;
1775
1776	for (i = 0; i < i_count; i++) {
1777		if (msblk->swap) {
1778			struct squashfs_dir_index sindex;
1779			squashfs_get_cached_block(s, &sindex, index_start, index_offset,
1780					sizeof(sindex), &index_start, &index_offset);
1781			SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
1782		} else
1783			squashfs_get_cached_block(s, &index, index_start, index_offset,
1784					sizeof(index), &index_start, &index_offset);
1785
1786		if (index.index > f_pos)
1787			break;
1788
1789		squashfs_get_cached_block(s, NULL, index_start, index_offset,
1790					index.size + 1, &index_start, &index_offset);
1791
1792		length = index.index;
1793		*next_block = index.start_block + sblk->directory_table_start;
1794	}
1795
1796	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1797
1798finish:
1799	return length + 3;
1800}
1801
1802
1803static int get_dir_index_using_name(struct super_block *s,
1804				long long *next_block, unsigned int *next_offset,
1805				long long index_start, unsigned int index_offset, int i_count,
1806				const char *name, int size)
1807{
1808	struct squashfs_sb_info *msblk = s->s_fs_info;
1809	struct squashfs_super_block *sblk = &msblk->sblk;
1810	int i, length = 0;
1811	struct squashfs_dir_index *index;
1812	char *str;
1813
1814	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
1815
1816	str = kmalloc(sizeof(struct squashfs_dir_index) +
1817		(SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL);
1818	if (str == NULL) {
1819		ERROR("Failed to allocate squashfs_dir_index\n");
1820		goto failure;
1821	}
1822
1823	index = (struct squashfs_dir_index *) (str + SQUASHFS_NAME_LEN + 1);
1824	strncpy(str, name, size);
1825	str[size] = '\0';
1826
1827	for (i = 0; i < i_count; i++) {
1828		if (msblk->swap) {
1829			struct squashfs_dir_index sindex;
1830			squashfs_get_cached_block(s, &sindex, index_start, index_offset,
1831				sizeof(sindex), &index_start, &index_offset);
1832			SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
1833		} else
1834			squashfs_get_cached_block(s, index, index_start, index_offset,
1835				sizeof(struct squashfs_dir_index), &index_start, &index_offset);
1836
1837		squashfs_get_cached_block(s, index->name, index_start, index_offset,
1838					index->size + 1, &index_start, &index_offset);
1839
1840		index->name[index->size + 1] = '\0';
1841
1842		if (strcmp(index->name, str) > 0)
1843			break;
1844
1845		length = index->index;
1846		*next_block = index->start_block + sblk->directory_table_start;
1847	}
1848
1849	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1850	kfree(str);
1851
1852failure:
1853	return length + 3;
1854}
1855
1856
1857static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
1858{
1859	struct inode *i = file->f_dentry->d_inode;
1860	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
1861	struct squashfs_super_block *sblk = &msblk->sblk;
1862	long long next_block = SQUASHFS_I(i)->start_block +
1863		sblk->directory_table_start;
1864	int next_offset = SQUASHFS_I(i)->offset, length = 0, dir_count;
1865	struct squashfs_dir_header dirh;
1866	struct squashfs_dir_entry *dire;
1867
1868	TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
1869
1870	dire = kmalloc(sizeof(struct squashfs_dir_entry) +
1871		SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
1872	if (dire == NULL) {
1873		ERROR("Failed to allocate squashfs_dir_entry\n");
1874		goto finish;
1875	}
1876
1877	while(file->f_pos < 3) {
1878		char *name;
1879		int size, i_ino;
1880
1881		if(file->f_pos == 0) {
1882			name = ".";
1883			size = 1;
1884			i_ino = i->i_ino;
1885		} else {
1886			name = "..";
1887			size = 2;
1888			i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
1889		}
1890		TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
1891				(unsigned int) dirent, name, size, (int)
1892				file->f_pos, i_ino, squashfs_filetype_table[1]);
1893
1894		if (filldir(dirent, name, size, file->f_pos, i_ino,
1895				squashfs_filetype_table[1]) < 0) {
1896				TRACE("Filldir returned less than 0\n");
1897			goto finish;
1898		}
1899		file->f_pos += size;
1900	}
1901
1902	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
1903				SQUASHFS_I(i)->u.s2.directory_index_start,
1904				SQUASHFS_I(i)->u.s2.directory_index_offset,
1905				SQUASHFS_I(i)->u.s2.directory_index_count, file->f_pos);
1906
1907	while (length < i_size_read(i)) {
1908		/* read directory header */
1909		if (msblk->swap) {
1910			struct squashfs_dir_header sdirh;
1911
1912			if (!squashfs_get_cached_block(i->i_sb, &sdirh, next_block,
1913					 next_offset, sizeof(sdirh), &next_block, &next_offset))
1914				goto failed_read;
1915
1916			length += sizeof(sdirh);
1917			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
1918		} else {
1919			if (!squashfs_get_cached_block(i->i_sb, &dirh, next_block,
1920					next_offset, sizeof(dirh), &next_block, &next_offset))
1921				goto failed_read;
1922
1923			length += sizeof(dirh);
1924		}
1925
1926		dir_count = dirh.count + 1;
1927		while (dir_count--) {
1928			if (msblk->swap) {
1929				struct squashfs_dir_entry sdire;
1930				if (!squashfs_get_cached_block(i->i_sb, &sdire, next_block,
1931						next_offset, sizeof(sdire), &next_block, &next_offset))
1932					goto failed_read;
1933
1934				length += sizeof(sdire);
1935				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
1936			} else {
1937				if (!squashfs_get_cached_block(i->i_sb, dire, next_block,
1938						next_offset, sizeof(*dire), &next_block, &next_offset))
1939					goto failed_read;
1940
1941				length += sizeof(*dire);
1942			}
1943
1944			if (!squashfs_get_cached_block(i->i_sb, dire->name, next_block,
1945						next_offset, dire->size + 1, &next_block, &next_offset))
1946				goto failed_read;
1947
1948			length += dire->size + 1;
1949
1950			if (file->f_pos >= length)
1951				continue;
1952
1953			dire->name[dire->size + 1] = '\0';
1954
1955			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
1956					(unsigned int) dirent, dire->name, dire->size + 1,
1957					(int) file->f_pos, dirh.start_block, dire->offset,
1958					dirh.inode_number + dire->inode_number,
1959					squashfs_filetype_table[dire->type]);
1960
1961			if (filldir(dirent, dire->name, dire->size + 1, file->f_pos,
1962					dirh.inode_number + dire->inode_number,
1963					squashfs_filetype_table[dire->type]) < 0) {
1964				TRACE("Filldir returned less than 0\n");
1965				goto finish;
1966			}
1967			file->f_pos = length;
1968		}
1969	}
1970
1971finish:
1972	kfree(dire);
1973	return 0;
1974
1975failed_read:
1976	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
1977		next_offset);
1978	kfree(dire);
1979	return 0;
1980}
1981
1982
1983static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
1984				struct nameidata *nd)
1985{
1986	const unsigned char *name = dentry->d_name.name;
1987	int len = dentry->d_name.len;
1988	struct inode *inode = NULL;
1989	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
1990	struct squashfs_super_block *sblk = &msblk->sblk;
1991	long long next_block = SQUASHFS_I(i)->start_block +
1992				sblk->directory_table_start;
1993	int next_offset = SQUASHFS_I(i)->offset, length = 0, dir_count;
1994	struct squashfs_dir_header dirh;
1995	struct squashfs_dir_entry *dire;
1996
1997	TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
1998
1999	dire = kmalloc(sizeof(struct squashfs_dir_entry) +
2000		SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
2001	if (dire == NULL) {
2002		ERROR("Failed to allocate squashfs_dir_entry\n");
2003		goto exit_lookup;
2004	}
2005
2006	if (len > SQUASHFS_NAME_LEN)
2007		goto exit_lookup;
2008
2009	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
2010				SQUASHFS_I(i)->u.s2.directory_index_start,
2011				SQUASHFS_I(i)->u.s2.directory_index_offset,
2012				SQUASHFS_I(i)->u.s2.directory_index_count, name, len);
2013
2014	while (length < i_size_read(i)) {
2015		/* read directory header */
2016		if (msblk->swap) {
2017			struct squashfs_dir_header sdirh;
2018			if (!squashfs_get_cached_block(i->i_sb, &sdirh, next_block,
2019					 next_offset, sizeof(sdirh), &next_block, &next_offset))
2020				goto failed_read;
2021
2022			length += sizeof(sdirh);
2023			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
2024		} else {
2025			if (!squashfs_get_cached_block(i->i_sb, &dirh, next_block,
2026					next_offset, sizeof(dirh), &next_block, &next_offset))
2027				goto failed_read;
2028
2029			length += sizeof(dirh);
2030		}
2031
2032		dir_count = dirh.count + 1;
2033		while (dir_count--) {
2034			if (msblk->swap) {
2035				struct squashfs_dir_entry sdire;
2036				if (!squashfs_get_cached_block(i->i_sb, &sdire, next_block,
2037						next_offset, sizeof(sdire), &next_block, &next_offset))
2038					goto failed_read;
2039
2040				length += sizeof(sdire);
2041				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
2042			} else {
2043				if (!squashfs_get_cached_block(i->i_sb, dire, next_block,
2044						next_offset, sizeof(*dire), &next_block, &next_offset))
2045					goto failed_read;
2046
2047				length += sizeof(*dire);
2048			}
2049
2050			if (!squashfs_get_cached_block(i->i_sb, dire->name, next_block,
2051					next_offset, dire->size + 1, &next_block, &next_offset))
2052				goto failed_read;
2053
2054			length += dire->size + 1;
2055
2056			if (name[0] < dire->name[0])
2057				goto exit_lookup;
2058
2059			if ((len == dire->size + 1) && !strncmp(name, dire->name, len)) {
2060				squashfs_inode_t ino = SQUASHFS_MKINODE(dirh.start_block,
2061								dire->offset);
2062
2063				TRACE("calling squashfs_iget for directory entry %s, inode"
2064					"  %x:%x, %d\n", name, dirh.start_block, dire->offset,
2065					dirh.inode_number + dire->inode_number);
2066
2067				inode = squashfs_iget(i->i_sb, ino, dirh.inode_number + dire->inode_number);
2068
2069				goto exit_lookup;
2070			}
2071		}
2072	}
2073
2074exit_lookup:
2075	kfree(dire);
2076	if (inode)
2077		return d_splice_alias(inode, dentry);
2078	d_add(dentry, inode);
2079	return ERR_PTR(0);
2080
2081failed_read:
2082	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
2083		next_offset);
2084	goto exit_lookup;
2085}
2086
2087
2088static int squashfs_remount(struct super_block *s, int *flags, char *data)
2089{
2090	*flags |= MS_RDONLY;
2091	return 0;
2092}
2093
2094
2095static void squashfs_put_super(struct super_block *s)
2096{
2097	int i;
2098
2099	if (s->s_fs_info) {
2100		struct squashfs_sb_info *sbi = s->s_fs_info;
2101		if (sbi->block_cache)
2102			for (i = 0; i < squashfs_cached_blks; i++)
2103				if (sbi->block_cache[i].block != SQUASHFS_INVALID_BLK)
2104					vfree(sbi->block_cache[i].data);
2105		if (sbi->fragment)
2106			for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++)
2107				vfree(sbi->fragment[i].data);
2108		kfree(sbi->fragment);
2109		kfree(sbi->block_cache);
2110		vfree(sbi->read_page);
2111		kfree(sbi->uid);
2112		kfree(sbi->fragment_index);
2113		kfree(sbi->fragment_index_2);
2114		kfree(sbi->meta_index);
2115		vfree(sbi->stream.workspace);
2116		kfree(s->s_fs_info);
2117		s->s_fs_info = NULL;
2118	}
2119}
2120
2121
2122static int squashfs_get_sb(struct file_system_type *fs_type, int flags,
2123				const char *dev_name, void *data, struct vfsmount *mnt)
2124{
2125	return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super,
2126				mnt);
2127}
2128
2129
2130static int __init init_squashfs_fs(void)
2131{
2132	int err = init_inodecache();
2133	if (err)
2134		goto out;
2135
2136	printk(KERN_INFO "squashfs: version 3.3-CVS (2008/05/11) "
2137		"Phillip Lougher\n");
2138
2139	err = register_filesystem(&squashfs_fs_type);
2140	if (err)
2141		destroy_inodecache();
2142
2143out:
2144	return err;
2145}
2146
2147
2148static void __exit exit_squashfs_fs(void)
2149{
2150	unregister_filesystem(&squashfs_fs_type);
2151	destroy_inodecache();
2152}
2153
2154
2155static struct kmem_cache * squashfs_inode_cachep;
2156
2157
2158static struct inode *squashfs_alloc_inode(struct super_block *sb)
2159{
2160	struct squashfs_inode_info *ei;
2161	ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL);
2162	return ei ? &ei->vfs_inode : NULL;
2163}
2164
2165
2166static void squashfs_destroy_inode(struct inode *inode)
2167{
2168	kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
2169}
2170
2171
2172static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
2173{
2174	struct squashfs_inode_info *ei = foo;
2175
2176	inode_init_once(&ei->vfs_inode);
2177}
2178
2179
2180static int __init init_inodecache(void)
2181{
2182	squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
2183	    sizeof(struct squashfs_inode_info), 0,
2184		SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, init_once, NULL);
2185	if (squashfs_inode_cachep == NULL)
2186		return -ENOMEM;
2187	return 0;
2188}
2189
2190
2191static void destroy_inodecache(void)
2192{
2193	kmem_cache_destroy(squashfs_inode_cachep);
2194}
2195
2196
2197module_init(init_squashfs_fs);
2198module_exit(exit_squashfs_fs);
2199MODULE_DESCRIPTION("squashfs 3.3-CVS, a compressed read-only filesystem");
2200MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>");
2201MODULE_LICENSE("GPL");
2202