inode.c revision 2051629df414de14867c1465025e8ec2453badc2
1/* 2 * Squashfs - a compressed read only filesystem for Linux 3 * 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007 5 * Phillip Lougher <phillip@lougher.org.uk> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2, 10 * or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * 21 * inode.c 22 */ 23 24#include <linux/squashfs_fs.h> 25#include <linux/module.h> 26#include <linux/zlib.h> 27#include <linux/fs.h> 28#include <linux/squashfs_fs_sb.h> 29#include <linux/squashfs_fs_i.h> 30#include <linux/buffer_head.h> 31#include <linux/vfs.h> 32#include <linux/vmalloc.h> 33#include <linux/smp_lock.h> 34 35#include "squashfs.h" 36 37int squashfs_cached_blks; 38 39static void vfs_read_inode(struct inode *i); 40static struct dentry *squashfs_get_parent(struct dentry *child); 41static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode); 42static int squashfs_statfs(struct dentry *, struct kstatfs *); 43static int squashfs_symlink_readpage(struct file *file, struct page *page); 44static long long read_blocklist(struct inode *inode, int index, 45 int readahead_blks, char *block_list, 46 unsigned short **block_p, unsigned int *bsize); 47static int squashfs_readpage(struct file *file, struct page *page); 48static int squashfs_readdir(struct file *, void *, filldir_t); 49static struct dentry *squashfs_lookup(struct inode *, struct dentry *, 50 struct nameidata *); 51static int squashfs_remount(struct super_block *s, int *flags, char *data); 52static void squashfs_put_super(struct super_block *); 53static int squashfs_get_sb(struct file_system_type *,int, const char *, void *, 54 struct vfsmount *); 55static struct inode *squashfs_alloc_inode(struct super_block *sb); 56static void squashfs_destroy_inode(struct inode *inode); 57static int init_inodecache(void); 58static void destroy_inodecache(void); 59 60static struct file_system_type squashfs_fs_type = { 61 .owner = THIS_MODULE, 62 .name = "squashfs", 63 .get_sb = squashfs_get_sb, 64 .kill_sb = kill_block_super, 65 .fs_flags = FS_REQUIRES_DEV 66}; 67 68static const unsigned char squashfs_filetype_table[] = { 69 DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK 70}; 71 72static struct super_operations squashfs_super_ops = { 73 .alloc_inode = squashfs_alloc_inode, 74 .destroy_inode = squashfs_destroy_inode, 75 .statfs = squashfs_statfs, 76 .put_super = squashfs_put_super, 77 .remount_fs = squashfs_remount 78}; 79 80static struct super_operations squashfs_export_super_ops = { 81 .alloc_inode = squashfs_alloc_inode, 82 .destroy_inode = squashfs_destroy_inode, 83 .statfs = squashfs_statfs, 84 .put_super = squashfs_put_super, 85 .read_inode = vfs_read_inode 86}; 87 88static struct export_operations squashfs_export_ops = { 89 .get_parent = squashfs_get_parent 90}; 91 92SQSH_EXTERN const struct address_space_operations squashfs_symlink_aops = { 93 .readpage = squashfs_symlink_readpage 94}; 95 96SQSH_EXTERN const struct address_space_operations squashfs_aops = { 97 .readpage = squashfs_readpage 98}; 99 100static const struct file_operations squashfs_dir_ops = { 101 .read = generic_read_dir, 102 .readdir = squashfs_readdir 103}; 104 105SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = { 106 .lookup = squashfs_lookup 107}; 108 109 110static struct buffer_head *get_block_length(struct super_block *s, 111 int *cur_index, int *offset, int *c_byte) 112{ 113 struct squashfs_sb_info *msblk = s->s_fs_info; 114 unsigned short temp; 115 struct buffer_head *bh; 116 117 if (!(bh = sb_bread(s, *cur_index))) 118 goto out; 119 120 if (msblk->devblksize - *offset == 1) { 121 if (msblk->swap) 122 ((unsigned char *) &temp)[1] = *((unsigned char *) 123 (bh->b_data + *offset)); 124 else 125 ((unsigned char *) &temp)[0] = *((unsigned char *) 126 (bh->b_data + *offset)); 127 brelse(bh); 128 if (!(bh = sb_bread(s, ++(*cur_index)))) 129 goto out; 130 if (msblk->swap) 131 ((unsigned char *) &temp)[0] = *((unsigned char *) 132 bh->b_data); 133 else 134 ((unsigned char *) &temp)[1] = *((unsigned char *) 135 bh->b_data); 136 *c_byte = temp; 137 *offset = 1; 138 } else { 139 if (msblk->swap) { 140 ((unsigned char *) &temp)[1] = *((unsigned char *) 141 (bh->b_data + *offset)); 142 ((unsigned char *) &temp)[0] = *((unsigned char *) 143 (bh->b_data + *offset + 1)); 144 } else { 145 ((unsigned char *) &temp)[0] = *((unsigned char *) 146 (bh->b_data + *offset)); 147 ((unsigned char *) &temp)[1] = *((unsigned char *) 148 (bh->b_data + *offset + 1)); 149 } 150 *c_byte = temp; 151 *offset += 2; 152 } 153 154 if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) { 155 if (*offset == msblk->devblksize) { 156 brelse(bh); 157 if (!(bh = sb_bread(s, ++(*cur_index)))) 158 goto out; 159 *offset = 0; 160 } 161 if (*((unsigned char *) (bh->b_data + *offset)) != 162 SQUASHFS_MARKER_BYTE) { 163 ERROR("Metadata block marker corrupt @ %x\n", 164 *cur_index); 165 brelse(bh); 166 goto out; 167 } 168 (*offset)++; 169 } 170 return bh; 171 172out: 173 return NULL; 174} 175 176 177SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer, 178 long long index, unsigned int length, 179 long long *next_index, int srclength) 180{ 181 struct squashfs_sb_info *msblk = s->s_fs_info; 182 struct squashfs_super_block *sblk = &msblk->sblk; 183 struct buffer_head **bh; 184 unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1); 185 unsigned int cur_index = index >> msblk->devblksize_log2; 186 int bytes, avail_bytes, b = 0, k = 0; 187 unsigned int compressed; 188 unsigned int c_byte = length; 189 190 bh = kmalloc(((sblk->block_size >> msblk->devblksize_log2) + 1) * 191 sizeof(struct buffer_head *), GFP_KERNEL); 192 if (bh == NULL) 193 goto read_failure; 194 195 if (c_byte) { 196 bytes = msblk->devblksize - offset; 197 compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte); 198 c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte); 199 200 TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index, 201 compressed ? "" : "un", (unsigned int) c_byte, srclength); 202 203 if (c_byte > srclength || index < 0 || (index + c_byte) > sblk->bytes_used) 204 goto read_failure; 205 206 bh[0] = sb_getblk(s, cur_index); 207 if (bh[0] == NULL) 208 goto block_release; 209 210 for (b = 1; bytes < c_byte; b++) { 211 bh[b] = sb_getblk(s, ++cur_index); 212 if (bh[b] == NULL) 213 goto block_release; 214 bytes += msblk->devblksize; 215 } 216 ll_rw_block(READ, b, bh); 217 } else { 218 if (index < 0 || (index + 2) > sblk->bytes_used) 219 goto read_failure; 220 221 bh[0] = get_block_length(s, &cur_index, &offset, &c_byte); 222 if (bh[0] == NULL) 223 goto read_failure; 224 225 bytes = msblk->devblksize - offset; 226 compressed = SQUASHFS_COMPRESSED(c_byte); 227 c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte); 228 229 TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed 230 ? "" : "un", (unsigned int) c_byte); 231 232 if (c_byte > srclength || (index + c_byte) > sblk->bytes_used) 233 goto read_failure; 234 235 for (b = 1; bytes < c_byte; b++) { 236 bh[b] = sb_getblk(s, ++cur_index); 237 if (bh[b] == NULL) 238 goto block_release; 239 bytes += msblk->devblksize; 240 } 241 ll_rw_block(READ, b - 1, bh + 1); 242 } 243 244 if (compressed) { 245 int zlib_err = 0; 246 247 /* 248 * uncompress block 249 */ 250 251 mutex_lock(&msblk->read_data_mutex); 252 253 msblk->stream.next_out = buffer; 254 msblk->stream.avail_out = srclength; 255 256 for (bytes = 0; k < b; k++) { 257 avail_bytes = min(c_byte - bytes, msblk->devblksize - offset); 258 259 wait_on_buffer(bh[k]); 260 if (!buffer_uptodate(bh[k])) 261 goto release_mutex; 262 263 msblk->stream.next_in = bh[k]->b_data + offset; 264 msblk->stream.avail_in = avail_bytes; 265 266 if (k == 0) { 267 zlib_err = zlib_inflateInit(&msblk->stream); 268 if (zlib_err != Z_OK) { 269 ERROR("zlib_inflateInit returned unexpected result 0x%x," 270 " srclength %d\n", zlib_err, srclength); 271 goto release_mutex; 272 } 273 274 if (avail_bytes == 0) { 275 offset = 0; 276 brelse(bh[k]); 277 continue; 278 } 279 } 280 281 zlib_err = zlib_inflate(&msblk->stream, Z_NO_FLUSH); 282 if (zlib_err != Z_OK && zlib_err != Z_STREAM_END) { 283 ERROR("zlib_inflate returned unexpected result 0x%x," 284 " srclength %d, avail_in %d, avail_out %d\n", zlib_err, 285 srclength, msblk->stream.avail_in, msblk->stream.avail_out); 286 goto release_mutex; 287 } 288 289 bytes += avail_bytes; 290 offset = 0; 291 brelse(bh[k]); 292 } 293 294 if (zlib_err != Z_STREAM_END) 295 goto release_mutex; 296 297 zlib_err = zlib_inflateEnd(&msblk->stream); 298 if (zlib_err != Z_OK) { 299 ERROR("zlib_inflateEnd returned unexpected result 0x%x," 300 " srclength %d\n", zlib_err, srclength); 301 goto release_mutex; 302 } 303 bytes = msblk->stream.total_out; 304 mutex_unlock(&msblk->read_data_mutex); 305 } else { 306 int i; 307 308 for(i = 0; i < b; i++) { 309 wait_on_buffer(bh[i]); 310 if (!buffer_uptodate(bh[i])) 311 goto block_release; 312 } 313 314 for (bytes = 0; k < b; k++) { 315 avail_bytes = min(c_byte - bytes, msblk->devblksize - offset); 316 317 memcpy(buffer + bytes, bh[k]->b_data + offset, avail_bytes); 318 bytes += avail_bytes; 319 offset = 0; 320 brelse(bh[k]); 321 } 322 } 323 324 if (next_index) 325 *next_index = index + c_byte + (length ? 0 : 326 (SQUASHFS_CHECK_DATA(msblk->sblk.flags) ? 3 : 2)); 327 328 kfree(bh); 329 return bytes; 330 331release_mutex: 332 mutex_unlock(&msblk->read_data_mutex); 333 334block_release: 335 for (; k < b; k++) 336 brelse(bh[k]); 337 338read_failure: 339 ERROR("sb_bread failed reading block 0x%x\n", cur_index); 340 kfree(bh); 341 return 0; 342} 343 344 345SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, void *buffer, 346 long long block, unsigned int offset, 347 int length, long long *next_block, 348 unsigned int *next_offset) 349{ 350 struct squashfs_sb_info *msblk = s->s_fs_info; 351 int n, i, bytes, return_length = length; 352 long long next_index; 353 354 TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset); 355 356 while (1) { 357 for (i = 0; i < squashfs_cached_blks; i++) 358 if (msblk->block_cache[i].block == block) 359 break; 360 361 mutex_lock(&msblk->block_cache_mutex); 362 363 if (i == squashfs_cached_blks) { 364 /* read inode header block */ 365 if (msblk->unused_cache_blks == 0) { 366 mutex_unlock(&msblk->block_cache_mutex); 367 wait_event(msblk->waitq, msblk->unused_cache_blks); 368 continue; 369 } 370 371 i = msblk->next_cache; 372 for (n = 0; n < squashfs_cached_blks; n++) { 373 if (msblk->block_cache[i].block != SQUASHFS_USED_BLK) 374 break; 375 i = (i + 1) % squashfs_cached_blks; 376 } 377 378 msblk->next_cache = (i + 1) % squashfs_cached_blks; 379 380 if (msblk->block_cache[i].block == SQUASHFS_INVALID_BLK) { 381 msblk->block_cache[i].data = vmalloc(SQUASHFS_METADATA_SIZE); 382 if (msblk->block_cache[i].data == NULL) { 383 ERROR("Failed to allocate cache block\n"); 384 mutex_unlock(&msblk->block_cache_mutex); 385 goto out; 386 } 387 } 388 389 msblk->block_cache[i].block = SQUASHFS_USED_BLK; 390 msblk->unused_cache_blks --; 391 mutex_unlock(&msblk->block_cache_mutex); 392 393 msblk->block_cache[i].length = squashfs_read_data(s, 394 msblk->block_cache[i].data, block, 0, &next_index, 395 SQUASHFS_METADATA_SIZE); 396 397 if (msblk->block_cache[i].length == 0) { 398 ERROR("Unable to read cache block [%llx:%x]\n", block, offset); 399 mutex_lock(&msblk->block_cache_mutex); 400 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK; 401 msblk->unused_cache_blks ++; 402 vfree(msblk->block_cache[i].data); 403 wake_up(&msblk->waitq); 404 mutex_unlock(&msblk->block_cache_mutex); 405 goto out; 406 } 407 408 mutex_lock(&msblk->block_cache_mutex); 409 msblk->block_cache[i].block = block; 410 msblk->block_cache[i].next_index = next_index; 411 msblk->unused_cache_blks ++; 412 wake_up(&msblk->waitq); 413 TRACE("Read cache block [%llx:%x]\n", block, offset); 414 } 415 416 if (msblk->block_cache[i].block != block) { 417 mutex_unlock(&msblk->block_cache_mutex); 418 continue; 419 } 420 421 bytes = msblk->block_cache[i].length - offset; 422 423 if (bytes < 1) { 424 mutex_unlock(&msblk->block_cache_mutex); 425 goto out; 426 } else if (bytes >= length) { 427 if (buffer) 428 memcpy(buffer, msblk->block_cache[i].data + offset, length); 429 if (msblk->block_cache[i].length - offset == length) { 430 *next_block = msblk->block_cache[i].next_index; 431 *next_offset = 0; 432 } else { 433 *next_block = block; 434 *next_offset = offset + length; 435 } 436 mutex_unlock(&msblk->block_cache_mutex); 437 goto finish; 438 } else { 439 if (buffer) { 440 memcpy(buffer, msblk->block_cache[i].data + offset, bytes); 441 buffer = (char *) buffer + bytes; 442 } 443 block = msblk->block_cache[i].next_index; 444 mutex_unlock(&msblk->block_cache_mutex); 445 length -= bytes; 446 offset = 0; 447 } 448 } 449 450finish: 451 return return_length; 452out: 453 return 0; 454} 455 456static int get_fragment_location(struct super_block *s, unsigned int fragment, 457 long long *fragment_start_block, 458 unsigned int *fragment_size) 459{ 460 struct squashfs_sb_info *msblk = s->s_fs_info; 461 long long start_block = 462 msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)]; 463 int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); 464 struct squashfs_fragment_entry fragment_entry; 465 466 if (msblk->swap) { 467 struct squashfs_fragment_entry sfragment_entry; 468 469 if (!squashfs_get_cached_block(s, &sfragment_entry, start_block, offset, 470 sizeof(sfragment_entry), &start_block, &offset)) 471 goto out; 472 SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry); 473 } else 474 if (!squashfs_get_cached_block(s, &fragment_entry, start_block, offset, sizeof(fragment_entry), &start_block, &offset)) 475 goto out; 476 477 *fragment_start_block = fragment_entry.start_block; 478 *fragment_size = fragment_entry.size; 479 480 return 1; 481 482out: 483 return 0; 484} 485 486 487SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, 488 struct squashfs_fragment_cache *fragment) 489{ 490 mutex_lock(&msblk->fragment_mutex); 491 fragment->locked --; 492 if (fragment->locked == 0) { 493 msblk->unused_frag_blks ++; 494 wake_up(&msblk->fragment_wait_queue); 495 } 496 mutex_unlock(&msblk->fragment_mutex); 497} 498 499 500SQSH_EXTERN 501struct squashfs_fragment_cache *get_cached_fragment(struct super_block *s, 502 long long start_block, int length) 503{ 504 int i, n; 505 struct squashfs_sb_info *msblk = s->s_fs_info; 506 struct squashfs_super_block *sblk = &msblk->sblk; 507 508 while (1) { 509 mutex_lock(&msblk->fragment_mutex); 510 511 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS && 512 msblk->fragment[i].block != start_block; i++); 513 514 if (i == SQUASHFS_CACHED_FRAGMENTS) { 515 if (msblk->unused_frag_blks == 0) { 516 mutex_unlock(&msblk->fragment_mutex); 517 wait_event(msblk->fragment_wait_queue, msblk->unused_frag_blks); 518 continue; 519 } 520 521 i = msblk->next_fragment; 522 for (n = 0; n < SQUASHFS_CACHED_FRAGMENTS; n++) { 523 if (msblk->fragment[i].locked == 0) 524 break; 525 i = (i + 1) % SQUASHFS_CACHED_FRAGMENTS; 526 } 527 528 msblk->next_fragment = (msblk->next_fragment + 1) % 529 SQUASHFS_CACHED_FRAGMENTS; 530 531 if (msblk->fragment[i].data == NULL) { 532 msblk->fragment[i].data = vmalloc(sblk->block_size); 533 if (msblk->fragment[i].data == NULL) { 534 ERROR("Failed to allocate fragment cache block\n"); 535 mutex_unlock(&msblk->fragment_mutex); 536 goto out; 537 } 538 } 539 540 msblk->unused_frag_blks --; 541 msblk->fragment[i].block = SQUASHFS_INVALID_BLK; 542 msblk->fragment[i].locked = 1; 543 mutex_unlock(&msblk->fragment_mutex); 544 545 msblk->fragment[i].length = squashfs_read_data(s, 546 msblk->fragment[i].data, start_block, length, NULL, 547 sblk->block_size); 548 549 if (msblk->fragment[i].length == 0) { 550 ERROR("Unable to read fragment cache block [%llx]\n", start_block); 551 msblk->fragment[i].locked = 0; 552 msblk->unused_frag_blks ++; 553 wake_up(&msblk->fragment_wait_queue); 554 goto out; 555 } 556 557 mutex_lock(&msblk->fragment_mutex); 558 msblk->fragment[i].block = start_block; 559 TRACE("New fragment %d, start block %lld, locked %d\n", 560 i, msblk->fragment[i].block, msblk->fragment[i].locked); 561 mutex_unlock(&msblk->fragment_mutex); 562 break; 563 } 564 565 if (msblk->fragment[i].locked == 0) 566 msblk->unused_frag_blks --; 567 msblk->fragment[i].locked++; 568 mutex_unlock(&msblk->fragment_mutex); 569 TRACE("Got fragment %d, start block %lld, locked %d\n", i, 570 msblk->fragment[i].block, msblk->fragment[i].locked); 571 break; 572 } 573 574 return &msblk->fragment[i]; 575 576out: 577 return NULL; 578} 579 580 581static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i, 582 struct squashfs_base_inode_header *inodeb) 583{ 584 i->i_ino = inodeb->inode_number; 585 i->i_mtime.tv_sec = inodeb->mtime; 586 i->i_atime.tv_sec = inodeb->mtime; 587 i->i_ctime.tv_sec = inodeb->mtime; 588 i->i_uid = msblk->uid[inodeb->uid]; 589 i->i_mode = inodeb->mode; 590 i->i_size = 0; 591 592 if (inodeb->guid == SQUASHFS_GUIDS) 593 i->i_gid = i->i_uid; 594 else 595 i->i_gid = msblk->guid[inodeb->guid]; 596} 597 598 599static squashfs_inode_t squashfs_inode_lookup(struct super_block *s, int ino) 600{ 601 struct squashfs_sb_info *msblk = s->s_fs_info; 602 long long start = msblk->inode_lookup_table[SQUASHFS_LOOKUP_BLOCK(ino - 1)]; 603 int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino - 1); 604 squashfs_inode_t inode; 605 606 TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino); 607 608 if (msblk->swap) { 609 squashfs_inode_t sinode; 610 611 if (!squashfs_get_cached_block(s, &sinode, start, offset, 612 sizeof(sinode), &start, &offset)) 613 goto out; 614 SQUASHFS_SWAP_INODE_T((&inode), &sinode); 615 } else if (!squashfs_get_cached_block(s, &inode, start, offset, 616 sizeof(inode), &start, &offset)) 617 goto out; 618 619 TRACE("squashfs_inode_lookup, inode = 0x%llx\n", inode); 620 621 return inode; 622 623out: 624 return SQUASHFS_INVALID_BLK; 625} 626 627 628static void vfs_read_inode(struct inode *i) 629{ 630 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info; 631 squashfs_inode_t inode = squashfs_inode_lookup(i->i_sb, i->i_ino); 632 633 TRACE("Entered vfs_read_inode\n"); 634 635 if(inode != SQUASHFS_INVALID_BLK) 636 (msblk->read_inode)(i, inode); 637} 638 639 640static struct dentry *squashfs_get_parent(struct dentry *child) 641{ 642 struct inode *i = child->d_inode; 643 struct inode *parent = iget(i->i_sb, SQUASHFS_I(i)->u.s2.parent_inode); 644 struct dentry *rv; 645 646 TRACE("Entered squashfs_get_parent\n"); 647 648 if(parent == NULL) { 649 rv = ERR_PTR(-EACCES); 650 goto out; 651 } 652 653 rv = d_alloc_anon(parent); 654 if(rv == NULL) 655 rv = ERR_PTR(-ENOMEM); 656 657out: 658 return rv; 659} 660 661 662SQSH_EXTERN struct inode *squashfs_iget(struct super_block *s, 663 squashfs_inode_t inode, unsigned int inode_number) 664{ 665 struct squashfs_sb_info *msblk = s->s_fs_info; 666 struct inode *i = iget_locked(s, inode_number); 667 668 TRACE("Entered squashfs_iget\n"); 669 670 if(i && (i->i_state & I_NEW)) { 671 (msblk->read_inode)(i, inode); 672 unlock_new_inode(i); 673 } 674 675 return i; 676} 677 678 679static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode) 680{ 681 struct super_block *s = i->i_sb; 682 struct squashfs_sb_info *msblk = s->s_fs_info; 683 struct squashfs_super_block *sblk = &msblk->sblk; 684 long long block = SQUASHFS_INODE_BLK(inode) + sblk->inode_table_start; 685 unsigned int offset = SQUASHFS_INODE_OFFSET(inode); 686 long long next_block; 687 unsigned int next_offset; 688 union squashfs_inode_header id, sid; 689 struct squashfs_base_inode_header *inodeb = &id.base, *sinodeb = &sid.base; 690 691 TRACE("Entered squashfs_read_inode\n"); 692 693 if (msblk->swap) { 694 if (!squashfs_get_cached_block(s, sinodeb, block, offset, 695 sizeof(*sinodeb), &next_block, &next_offset)) 696 goto failed_read; 697 SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb, sizeof(*sinodeb)); 698 } else 699 if (!squashfs_get_cached_block(s, inodeb, block, offset, 700 sizeof(*inodeb), &next_block, &next_offset)) 701 goto failed_read; 702 703 squashfs_new_inode(msblk, i, inodeb); 704 705 switch(inodeb->inode_type) { 706 case SQUASHFS_FILE_TYPE: { 707 unsigned int frag_size; 708 long long frag_blk; 709 struct squashfs_reg_inode_header *inodep = &id.reg; 710 struct squashfs_reg_inode_header *sinodep = &sid.reg; 711 712 if (msblk->swap) { 713 if (!squashfs_get_cached_block(s, sinodep, block, offset, 714 sizeof(*sinodep), &next_block, &next_offset)) 715 goto failed_read; 716 SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep); 717 } else 718 if (!squashfs_get_cached_block(s, inodep, block, offset, 719 sizeof(*inodep), &next_block, &next_offset)) 720 goto failed_read; 721 722 frag_blk = SQUASHFS_INVALID_BLK; 723 724 if (inodep->fragment != SQUASHFS_INVALID_FRAG) 725 if(!get_fragment_location(s, inodep->fragment, &frag_blk, 726 &frag_size)) 727 goto failed_read; 728 729 i->i_nlink = 1; 730 i->i_size = inodep->file_size; 731 i->i_fop = &generic_ro_fops; 732 i->i_mode |= S_IFREG; 733 i->i_blocks = ((i->i_size - 1) >> 9) + 1; 734 SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk; 735 SQUASHFS_I(i)->u.s1.fragment_size = frag_size; 736 SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset; 737 SQUASHFS_I(i)->start_block = inodep->start_block; 738 SQUASHFS_I(i)->u.s1.block_list_start = next_block; 739 SQUASHFS_I(i)->offset = next_offset; 740 i->i_data.a_ops = &squashfs_aops; 741 742 TRACE("File inode %x:%x, start_block %llx, " 743 "block_list_start %llx, offset %x\n", 744 SQUASHFS_INODE_BLK(inode), offset, 745 inodep->start_block, next_block, 746 next_offset); 747 break; 748 } 749 case SQUASHFS_LREG_TYPE: { 750 unsigned int frag_size; 751 long long frag_blk; 752 struct squashfs_lreg_inode_header *inodep = &id.lreg; 753 struct squashfs_lreg_inode_header *sinodep = &sid.lreg; 754 755 if (msblk->swap) { 756 if (!squashfs_get_cached_block(s, sinodep, block, offset, 757 sizeof(*sinodep), &next_block, &next_offset)) 758 goto failed_read; 759 SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep); 760 } else 761 if (!squashfs_get_cached_block(s, inodep, block, offset, 762 sizeof(*inodep), &next_block, &next_offset)) 763 goto failed_read; 764 765 frag_blk = SQUASHFS_INVALID_BLK; 766 767 if (inodep->fragment != SQUASHFS_INVALID_FRAG) 768 if (!get_fragment_location(s, inodep->fragment, &frag_blk, 769 &frag_size)) 770 goto failed_read; 771 772 i->i_nlink = inodep->nlink; 773 i->i_size = inodep->file_size; 774 i->i_fop = &generic_ro_fops; 775 i->i_mode |= S_IFREG; 776 i->i_blocks = ((i->i_size - 1) >> 9) + 1; 777 SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk; 778 SQUASHFS_I(i)->u.s1.fragment_size = frag_size; 779 SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset; 780 SQUASHFS_I(i)->start_block = inodep->start_block; 781 SQUASHFS_I(i)->u.s1.block_list_start = next_block; 782 SQUASHFS_I(i)->offset = next_offset; 783 i->i_data.a_ops = &squashfs_aops; 784 785 TRACE("File inode %x:%x, start_block %llx, " 786 "block_list_start %llx, offset %x\n", 787 SQUASHFS_INODE_BLK(inode), offset, 788 inodep->start_block, next_block, 789 next_offset); 790 break; 791 } 792 case SQUASHFS_DIR_TYPE: { 793 struct squashfs_dir_inode_header *inodep = &id.dir; 794 struct squashfs_dir_inode_header *sinodep = &sid.dir; 795 796 if (msblk->swap) { 797 if (!squashfs_get_cached_block(s, sinodep, block, offset, 798 sizeof(*sinodep), &next_block, &next_offset)) 799 goto failed_read; 800 SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep); 801 } else 802 if (!squashfs_get_cached_block(s, inodep, block, offset, 803 sizeof(*inodep), &next_block, &next_offset)) 804 goto failed_read; 805 806 i->i_nlink = inodep->nlink; 807 i->i_size = inodep->file_size; 808 i->i_op = &squashfs_dir_inode_ops; 809 i->i_fop = &squashfs_dir_ops; 810 i->i_mode |= S_IFDIR; 811 SQUASHFS_I(i)->start_block = inodep->start_block; 812 SQUASHFS_I(i)->offset = inodep->offset; 813 SQUASHFS_I(i)->u.s2.directory_index_count = 0; 814 SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode; 815 816 TRACE("Directory inode %x:%x, start_block %x, offset " 817 "%x\n", SQUASHFS_INODE_BLK(inode), 818 offset, inodep->start_block, 819 inodep->offset); 820 break; 821 } 822 case SQUASHFS_LDIR_TYPE: { 823 struct squashfs_ldir_inode_header *inodep = &id.ldir; 824 struct squashfs_ldir_inode_header *sinodep = &sid.ldir; 825 826 if (msblk->swap) { 827 if (!squashfs_get_cached_block(s, sinodep, block, offset, 828 sizeof(*sinodep), &next_block, &next_offset)) 829 goto failed_read; 830 SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep, sinodep); 831 } else 832 if (!squashfs_get_cached_block(s, inodep, block, offset, 833 sizeof(*inodep), &next_block, &next_offset)) 834 goto failed_read; 835 836 i->i_nlink = inodep->nlink; 837 i->i_size = inodep->file_size; 838 i->i_op = &squashfs_dir_inode_ops; 839 i->i_fop = &squashfs_dir_ops; 840 i->i_mode |= S_IFDIR; 841 SQUASHFS_I(i)->start_block = inodep->start_block; 842 SQUASHFS_I(i)->offset = inodep->offset; 843 SQUASHFS_I(i)->u.s2.directory_index_start = next_block; 844 SQUASHFS_I(i)->u.s2.directory_index_offset = next_offset; 845 SQUASHFS_I(i)->u.s2.directory_index_count = inodep->i_count; 846 SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode; 847 848 TRACE("Long directory inode %x:%x, start_block %x, offset %x\n", 849 SQUASHFS_INODE_BLK(inode), offset, 850 inodep->start_block, inodep->offset); 851 break; 852 } 853 case SQUASHFS_SYMLINK_TYPE: { 854 struct squashfs_symlink_inode_header *inodep = &id.symlink; 855 struct squashfs_symlink_inode_header *sinodep = &sid.symlink; 856 857 if (msblk->swap) { 858 if (!squashfs_get_cached_block(s, sinodep, block, offset, 859 sizeof(*sinodep), &next_block, &next_offset)) 860 goto failed_read; 861 SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep, sinodep); 862 } else 863 if (!squashfs_get_cached_block(s, inodep, block, offset, 864 sizeof(*inodep), &next_block, &next_offset)) 865 goto failed_read; 866 867 i->i_nlink = inodep->nlink; 868 i->i_size = inodep->symlink_size; 869 i->i_op = &page_symlink_inode_operations; 870 i->i_data.a_ops = &squashfs_symlink_aops; 871 i->i_mode |= S_IFLNK; 872 SQUASHFS_I(i)->start_block = next_block; 873 SQUASHFS_I(i)->offset = next_offset; 874 875 TRACE("Symbolic link inode %x:%x, start_block %llx, offset %x\n", 876 SQUASHFS_INODE_BLK(inode), offset, 877 next_block, next_offset); 878 break; 879 } 880 case SQUASHFS_BLKDEV_TYPE: 881 case SQUASHFS_CHRDEV_TYPE: { 882 struct squashfs_dev_inode_header *inodep = &id.dev; 883 struct squashfs_dev_inode_header *sinodep = &sid.dev; 884 885 if (msblk->swap) { 886 if (!squashfs_get_cached_block(s, sinodep, block, offset, 887 sizeof(*sinodep), &next_block, &next_offset)) 888 goto failed_read; 889 SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep); 890 } else 891 if (!squashfs_get_cached_block(s, inodep, block, offset, 892 sizeof(*inodep), &next_block, &next_offset)) 893 goto failed_read; 894 895 i->i_nlink = inodep->nlink; 896 i->i_mode |= (inodeb->inode_type == SQUASHFS_CHRDEV_TYPE) ? 897 S_IFCHR : S_IFBLK; 898 init_special_inode(i, i->i_mode, old_decode_dev(inodep->rdev)); 899 900 TRACE("Device inode %x:%x, rdev %x\n", 901 SQUASHFS_INODE_BLK(inode), offset, inodep->rdev); 902 break; 903 } 904 case SQUASHFS_FIFO_TYPE: 905 case SQUASHFS_SOCKET_TYPE: { 906 struct squashfs_ipc_inode_header *inodep = &id.ipc; 907 struct squashfs_ipc_inode_header *sinodep = &sid.ipc; 908 909 if (msblk->swap) { 910 if (!squashfs_get_cached_block(s, sinodep, block, offset, 911 sizeof(*sinodep), &next_block, &next_offset)) 912 goto failed_read; 913 SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep); 914 } else 915 if (!squashfs_get_cached_block(s, inodep, block, offset, 916 sizeof(*inodep), &next_block, &next_offset)) 917 goto failed_read; 918 919 i->i_nlink = inodep->nlink; 920 i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE) 921 ? S_IFIFO : S_IFSOCK; 922 init_special_inode(i, i->i_mode, 0); 923 break; 924 } 925 default: 926 ERROR("Unknown inode type %d in squashfs_iget!\n", 927 inodeb->inode_type); 928 goto failed_read1; 929 } 930 931 return 1; 932 933failed_read: 934 ERROR("Unable to read inode [%llx:%x]\n", block, offset); 935 936failed_read1: 937 make_bad_inode(i); 938 return 0; 939} 940 941 942static int read_inode_lookup_table(struct super_block *s) 943{ 944 struct squashfs_sb_info *msblk = s->s_fs_info; 945 struct squashfs_super_block *sblk = &msblk->sblk; 946 unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(sblk->inodes); 947 948 TRACE("In read_inode_lookup_table, length %d\n", length); 949 950 /* Allocate inode lookup table */ 951 msblk->inode_lookup_table = kmalloc(length, GFP_KERNEL); 952 if (msblk->inode_lookup_table == NULL) { 953 ERROR("Failed to allocate inode lookup table\n"); 954 return 0; 955 } 956 957 if (!squashfs_read_data(s, (char *) msblk->inode_lookup_table, 958 sblk->lookup_table_start, length | 959 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) { 960 ERROR("unable to read inode lookup table\n"); 961 return 0; 962 } 963 964 if (msblk->swap) { 965 int i; 966 long long block; 967 968 for (i = 0; i < SQUASHFS_LOOKUP_BLOCKS(sblk->inodes); i++) { 969 /* XXX */ 970 SQUASHFS_SWAP_LOOKUP_BLOCKS((&block), 971 &msblk->inode_lookup_table[i], 1); 972 msblk->inode_lookup_table[i] = block; 973 } 974 } 975 976 return 1; 977} 978 979 980static int read_fragment_index_table(struct super_block *s) 981{ 982 struct squashfs_sb_info *msblk = s->s_fs_info; 983 struct squashfs_super_block *sblk = &msblk->sblk; 984 unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments); 985 986 if(length == 0) 987 return 1; 988 989 /* Allocate fragment index table */ 990 msblk->fragment_index = kmalloc(length, GFP_KERNEL); 991 if (msblk->fragment_index == NULL) { 992 ERROR("Failed to allocate fragment index table\n"); 993 return 0; 994 } 995 996 if (!squashfs_read_data(s, (char *) msblk->fragment_index, 997 sblk->fragment_table_start, length | 998 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) { 999 ERROR("unable to read fragment index table\n"); 1000 return 0; 1001 } 1002 1003 if (msblk->swap) { 1004 int i; 1005 long long fragment; 1006 1007 for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments); i++) { 1008 /* XXX */ 1009 SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment), 1010 &msblk->fragment_index[i], 1); 1011 msblk->fragment_index[i] = fragment; 1012 } 1013 } 1014 1015 return 1; 1016} 1017 1018 1019#if 0 1020static int readahead_metadata(struct super_block *s) 1021{ 1022 struct squashfs_sb_info *msblk = s->s_fs_info; 1023 struct squashfs_super_block *sblk = &msblk->sblk; 1024 long long block = sblk->inode_table_start; 1025 int i; 1026 1027 squashfs_cached_blks = 1000; 1028 1029 /* Init inode_table block pointer array */ 1030 msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) * 1031 squashfs_cached_blks, GFP_KERNEL); 1032 if (msblk->block_cache == NULL) { 1033 ERROR("Failed to allocate block cache\n"); 1034 goto failed2; 1035 } 1036 1037 for (i = 0; i < squashfs_cached_blks; i++) 1038 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK; 1039 1040 msblk->next_cache = 0; 1041 msblk->unused_cache_blks = squashfs_cached_blks; 1042 1043 for (i = 0; i < squashfs_cached_blks && block < sblk->fragment_table_start; 1044 i++) { 1045 msblk->block_cache[i].data = vmalloc(SQUASHFS_METADATA_SIZE); 1046 if (msblk->block_cache[i].data == NULL) { 1047 ERROR("Failed to allocate metadata block\n"); 1048 goto failed; 1049 } 1050 1051 msblk->block_cache[i].block = block; 1052 1053 msblk->block_cache[i].length = squashfs_read_data(s, msblk->block_cache[i].data, 1054 block, 0, &block, SQUASHFS_METADATA_SIZE); 1055 1056 if (msblk->block_cache[i].length == 0) 1057 goto failed; 1058 1059 msblk->block_cache[i].next_index = block; 1060 } 1061 1062 return 1; 1063 1064failed: 1065 for(; i >= 0; i --) 1066 vfree(msblk->block_cache[i].data); 1067 1068failed2: 1069 return 0; 1070} 1071#endif 1072static int readahead_metadata(struct super_block *s) 1073{ 1074 struct squashfs_sb_info *msblk = s->s_fs_info; 1075 int i; 1076 1077 squashfs_cached_blks = SQUASHFS_CACHED_BLKS; 1078 1079 /* Init inode_table block pointer array */ 1080 msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) * 1081 squashfs_cached_blks, GFP_KERNEL); 1082 if (msblk->block_cache == NULL) { 1083 ERROR("Failed to allocate block cache\n"); 1084 goto failed; 1085 } 1086 1087 for (i = 0; i < squashfs_cached_blks; i++) 1088 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK; 1089 1090 msblk->next_cache = 0; 1091 msblk->unused_cache_blks = squashfs_cached_blks; 1092 1093 return 1; 1094 1095failed: 1096 return 0; 1097} 1098 1099 1100static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent) 1101{ 1102 struct squashfs_super_block *sblk = &msblk->sblk; 1103 1104 msblk->read_inode = squashfs_read_inode; 1105 msblk->read_blocklist = read_blocklist; 1106 msblk->read_fragment_index_table = read_fragment_index_table; 1107 1108 if (sblk->s_major == 1) { 1109 if (!squashfs_1_0_supported(msblk)) { 1110 SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems " 1111 "are unsupported\n"); 1112 SERROR("Please recompile with Squashfs 1.0 support enabled\n"); 1113 return 0; 1114 } 1115 } else if (sblk->s_major == 2) { 1116 if (!squashfs_2_0_supported(msblk)) { 1117 SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems " 1118 "are unsupported\n"); 1119 SERROR("Please recompile with Squashfs 2.0 support enabled\n"); 1120 return 0; 1121 } 1122 } else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor > 1123 SQUASHFS_MINOR) { 1124 SERROR("Major/Minor mismatch, trying to mount newer %d.%d " 1125 "filesystem\n", sblk->s_major, sblk->s_minor); 1126 SERROR("Please update your kernel\n"); 1127 return 0; 1128 } 1129 1130 return 1; 1131} 1132 1133 1134static int squashfs_fill_super(struct super_block *s, void *data, int silent) 1135{ 1136 struct squashfs_sb_info *msblk; 1137 struct squashfs_super_block *sblk; 1138 int i; 1139 char b[BDEVNAME_SIZE]; 1140 struct inode *root; 1141 1142 TRACE("Entered squashfs_read_superblock\n"); 1143 1144 s->s_fs_info = kzalloc(sizeof(struct squashfs_sb_info), GFP_KERNEL); 1145 if (s->s_fs_info == NULL) { 1146 ERROR("Failed to allocate superblock\n"); 1147 goto failure; 1148 } 1149 msblk = s->s_fs_info; 1150 1151 msblk->stream.workspace = vmalloc(zlib_inflate_workspacesize()); 1152 if (msblk->stream.workspace == NULL) { 1153 ERROR("Failed to allocate zlib workspace\n"); 1154 goto failure; 1155 } 1156 sblk = &msblk->sblk; 1157 1158 msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE); 1159 msblk->devblksize_log2 = ffz(~msblk->devblksize); 1160 1161 mutex_init(&msblk->read_data_mutex); 1162 mutex_init(&msblk->read_page_mutex); 1163 mutex_init(&msblk->block_cache_mutex); 1164 mutex_init(&msblk->fragment_mutex); 1165 mutex_init(&msblk->meta_index_mutex); 1166 1167 init_waitqueue_head(&msblk->waitq); 1168 init_waitqueue_head(&msblk->fragment_wait_queue); 1169 1170 sblk->bytes_used = sizeof(struct squashfs_super_block); 1171 if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START, 1172 sizeof(struct squashfs_super_block) | 1173 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, sizeof(struct squashfs_super_block))) { 1174 SERROR("unable to read superblock\n"); 1175 goto failed_mount; 1176 } 1177 1178 /* Check it is a SQUASHFS superblock */ 1179 msblk->swap = 0; 1180 if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) { 1181 if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) { 1182 struct squashfs_super_block ssblk; 1183 1184 WARNING("Mounting a different endian SQUASHFS filesystem on %s\n", 1185 bdevname(s->s_bdev, b)); 1186 1187 SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk); 1188 memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block)); 1189 msblk->swap = 1; 1190 } else { 1191 SERROR("Can't find a SQUASHFS superblock on %s\n", 1192 bdevname(s->s_bdev, b)); 1193 goto failed_mount; 1194 } 1195 } 1196 1197 /* Check the MAJOR & MINOR versions */ 1198 if(!supported_squashfs_filesystem(msblk, silent)) 1199 goto failed_mount; 1200 1201 /* Check the filesystem does not extend beyond the end of the 1202 block device */ 1203 if(sblk->bytes_used < 0 || sblk->bytes_used > i_size_read(s->s_bdev->bd_inode)) 1204 goto failed_mount; 1205 1206 /* Check the root inode for sanity */ 1207 if (SQUASHFS_INODE_OFFSET(sblk->root_inode) > SQUASHFS_METADATA_SIZE) 1208 goto failed_mount; 1209 1210 TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b)); 1211 TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(sblk->flags) 1212 ? "un" : ""); 1213 TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(sblk->flags) 1214 ? "un" : ""); 1215 TRACE("Check data is %s present in the filesystem\n", 1216 SQUASHFS_CHECK_DATA(sblk->flags) ? "" : "not"); 1217 TRACE("Filesystem size %lld bytes\n", sblk->bytes_used); 1218 TRACE("Block size %d\n", sblk->block_size); 1219 TRACE("Number of inodes %d\n", sblk->inodes); 1220 if (sblk->s_major > 1) 1221 TRACE("Number of fragments %d\n", sblk->fragments); 1222 TRACE("Number of uids %d\n", sblk->no_uids); 1223 TRACE("Number of gids %d\n", sblk->no_guids); 1224 TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start); 1225 TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start); 1226 if (sblk->s_major > 1) 1227 TRACE("sblk->fragment_table_start %llx\n", sblk->fragment_table_start); 1228 TRACE("sblk->uid_start %llx\n", sblk->uid_start); 1229 1230 s->s_maxbytes = MAX_LFS_FILESIZE; 1231 s->s_flags |= MS_RDONLY; 1232 s->s_op = &squashfs_super_ops; 1233 1234 if (readahead_metadata(s) == 0) 1235 goto failed_mount; 1236 1237 /* Allocate read_page block */ 1238 msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL); 1239 if (msblk->read_page == NULL) { 1240 ERROR("Failed to allocate read_page block\n"); 1241 goto failed_mount; 1242 } 1243 1244 /* Allocate uid and gid tables */ 1245 msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) * 1246 sizeof(unsigned int), GFP_KERNEL); 1247 if (msblk->uid == NULL) { 1248 ERROR("Failed to allocate uid/gid table\n"); 1249 goto failed_mount; 1250 } 1251 msblk->guid = msblk->uid + sblk->no_uids; 1252 1253 if (msblk->swap) { 1254 unsigned int suid[sblk->no_uids + sblk->no_guids]; 1255 1256 if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start, 1257 ((sblk->no_uids + sblk->no_guids) * 1258 sizeof(unsigned int)) | 1259 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) { 1260 ERROR("unable to read uid/gid table\n"); 1261 goto failed_mount; 1262 } 1263 1264 SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids + 1265 sblk->no_guids), (sizeof(unsigned int) * 8)); 1266 } else 1267 if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start, 1268 ((sblk->no_uids + sblk->no_guids) * 1269 sizeof(unsigned int)) | 1270 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) { 1271 ERROR("unable to read uid/gid table\n"); 1272 goto failed_mount; 1273 } 1274 1275 1276 if (sblk->s_major == 1 && squashfs_1_0_supported(msblk)) 1277 goto allocate_root; 1278 1279 msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) * 1280 SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL); 1281 if (msblk->fragment == NULL) { 1282 ERROR("Failed to allocate fragment block cache\n"); 1283 goto failed_mount; 1284 } 1285 1286 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) { 1287 msblk->fragment[i].locked = 0; 1288 msblk->fragment[i].block = SQUASHFS_INVALID_BLK; 1289 msblk->fragment[i].data = NULL; 1290 } 1291 1292 msblk->next_fragment = 0; 1293 msblk->unused_frag_blks = SQUASHFS_CACHED_FRAGMENTS; 1294 1295 /* Allocate and read fragment index table */ 1296 if (msblk->read_fragment_index_table(s) == 0) 1297 goto failed_mount; 1298 1299 if(sblk->s_major < 3 || sblk->lookup_table_start == SQUASHFS_INVALID_BLK) 1300 goto allocate_root; 1301 1302 /* Allocate and read inode lookup table */ 1303 if (read_inode_lookup_table(s) == 0) 1304 goto failed_mount; 1305 1306 s->s_op = &squashfs_export_super_ops; 1307 s->s_export_op = &squashfs_export_ops; 1308 1309allocate_root: 1310 root = new_inode(s); 1311 if ((msblk->read_inode)(root, sblk->root_inode) == 0) 1312 goto failed_mount; 1313 insert_inode_hash(root); 1314 1315 s->s_root = d_alloc_root(root); 1316 if (s->s_root == NULL) { 1317 ERROR("Root inode create failed\n"); 1318 iput(root); 1319 goto failed_mount; 1320 } 1321 1322 TRACE("Leaving squashfs_read_super\n"); 1323 return 0; 1324 1325failed_mount: 1326 kfree(msblk->inode_lookup_table); 1327 kfree(msblk->fragment_index); 1328 kfree(msblk->fragment); 1329 kfree(msblk->uid); 1330 kfree(msblk->read_page); 1331 kfree(msblk->block_cache); 1332 kfree(msblk->fragment_index_2); 1333 vfree(msblk->stream.workspace); 1334 kfree(s->s_fs_info); 1335 s->s_fs_info = NULL; 1336 return -EINVAL; 1337 1338failure: 1339 return -ENOMEM; 1340} 1341 1342 1343static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf) 1344{ 1345 struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info; 1346 struct squashfs_super_block *sblk = &msblk->sblk; 1347 1348 TRACE("Entered squashfs_statfs\n"); 1349 1350 buf->f_type = SQUASHFS_MAGIC; 1351 buf->f_bsize = sblk->block_size; 1352 buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1; 1353 buf->f_bfree = buf->f_bavail = 0; 1354 buf->f_files = sblk->inodes; 1355 buf->f_ffree = 0; 1356 buf->f_namelen = SQUASHFS_NAME_LEN; 1357 1358 return 0; 1359} 1360 1361 1362static int squashfs_symlink_readpage(struct file *file, struct page *page) 1363{ 1364 struct inode *inode = page->mapping->host; 1365 int index = page->index << PAGE_CACHE_SHIFT, length, bytes, avail_bytes; 1366 long long block = SQUASHFS_I(inode)->start_block; 1367 int offset = SQUASHFS_I(inode)->offset; 1368 void *pageaddr = kmap(page); 1369 1370 TRACE("Entered squashfs_symlink_readpage, page index %ld, start block " 1371 "%llx, offset %x\n", page->index, 1372 SQUASHFS_I(inode)->start_block, 1373 SQUASHFS_I(inode)->offset); 1374 1375 for (length = 0; length < index; length += bytes) { 1376 bytes = squashfs_get_cached_block(inode->i_sb, NULL, block, 1377 offset, PAGE_CACHE_SIZE, &block, &offset); 1378 if (bytes == 0) { 1379 ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset); 1380 goto skip_read; 1381 } 1382 } 1383 1384 if (length != index) { 1385 ERROR("(squashfs_symlink_readpage) length != index\n"); 1386 bytes = 0; 1387 goto skip_read; 1388 } 1389 1390 avail_bytes = min_t(int, i_size_read(inode) - length, PAGE_CACHE_SIZE); 1391 1392 bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block, offset, 1393 avail_bytes, &block, &offset); 1394 if (bytes == 0) 1395 ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset); 1396 1397skip_read: 1398 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); 1399 kunmap(page); 1400 flush_dcache_page(page); 1401 SetPageUptodate(page); 1402 unlock_page(page); 1403 1404 return 0; 1405} 1406 1407 1408struct meta_index *locate_meta_index(struct inode *inode, int index, int offset) 1409{ 1410 struct meta_index *meta = NULL; 1411 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 1412 int i; 1413 1414 mutex_lock(&msblk->meta_index_mutex); 1415 1416 TRACE("locate_meta_index: index %d, offset %d\n", index, offset); 1417 1418 if (msblk->meta_index == NULL) 1419 goto not_allocated; 1420 1421 for (i = 0; i < SQUASHFS_META_NUMBER; i ++) { 1422 if (msblk->meta_index[i].inode_number == inode->i_ino && 1423 msblk->meta_index[i].offset >= offset && 1424 msblk->meta_index[i].offset <= index && 1425 msblk->meta_index[i].locked == 0) { 1426 TRACE("locate_meta_index: entry %d, offset %d\n", i, 1427 msblk->meta_index[i].offset); 1428 meta = &msblk->meta_index[i]; 1429 offset = meta->offset; 1430 } 1431 } 1432 1433 if (meta) 1434 meta->locked = 1; 1435 1436not_allocated: 1437 mutex_unlock(&msblk->meta_index_mutex); 1438 1439 return meta; 1440} 1441 1442 1443struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip) 1444{ 1445 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 1446 struct meta_index *meta = NULL; 1447 int i; 1448 1449 mutex_lock(&msblk->meta_index_mutex); 1450 1451 TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip); 1452 1453 if (msblk->meta_index == NULL) { 1454 msblk->meta_index = kmalloc(sizeof(struct meta_index) * 1455 SQUASHFS_META_NUMBER, GFP_KERNEL); 1456 if (msblk->meta_index == NULL) { 1457 ERROR("Failed to allocate meta_index\n"); 1458 goto failed; 1459 } 1460 for (i = 0; i < SQUASHFS_META_NUMBER; i++) { 1461 msblk->meta_index[i].inode_number = 0; 1462 msblk->meta_index[i].locked = 0; 1463 } 1464 msblk->next_meta_index = 0; 1465 } 1466 1467 for (i = SQUASHFS_META_NUMBER; i && 1468 msblk->meta_index[msblk->next_meta_index].locked; i --) 1469 msblk->next_meta_index = (msblk->next_meta_index + 1) % 1470 SQUASHFS_META_NUMBER; 1471 1472 if (i == 0) { 1473 TRACE("empty_meta_index: failed!\n"); 1474 goto failed; 1475 } 1476 1477 TRACE("empty_meta_index: returned meta entry %d, %p\n", 1478 msblk->next_meta_index, 1479 &msblk->meta_index[msblk->next_meta_index]); 1480 1481 meta = &msblk->meta_index[msblk->next_meta_index]; 1482 msblk->next_meta_index = (msblk->next_meta_index + 1) % 1483 SQUASHFS_META_NUMBER; 1484 1485 meta->inode_number = inode->i_ino; 1486 meta->offset = offset; 1487 meta->skip = skip; 1488 meta->entries = 0; 1489 meta->locked = 1; 1490 1491failed: 1492 mutex_unlock(&msblk->meta_index_mutex); 1493 return meta; 1494} 1495 1496 1497void release_meta_index(struct inode *inode, struct meta_index *meta) 1498{ 1499 meta->locked = 0; 1500 smp_mb(); 1501} 1502 1503 1504static int read_block_index(struct super_block *s, int blocks, char *block_list, 1505 long long *start_block, int *offset) 1506{ 1507 struct squashfs_sb_info *msblk = s->s_fs_info; 1508 unsigned int *block_listp; 1509 int block = 0; 1510 1511 if (msblk->swap) { 1512 char sblock_list[blocks << 2]; 1513 1514 if (!squashfs_get_cached_block(s, sblock_list, *start_block, 1515 *offset, blocks << 2, start_block, offset)) { 1516 ERROR("Fail reading block list [%llx:%x]\n", *start_block, *offset); 1517 goto failure; 1518 } 1519 SQUASHFS_SWAP_INTS(((unsigned int *)block_list), 1520 ((unsigned int *)sblock_list), blocks); 1521 } else { 1522 if (!squashfs_get_cached_block(s, block_list, *start_block, 1523 *offset, blocks << 2, start_block, offset)) { 1524 ERROR("Fail reading block list [%llx:%x]\n", *start_block, *offset); 1525 goto failure; 1526 } 1527 } 1528 1529 for (block_listp = (unsigned int *) block_list; blocks; 1530 block_listp++, blocks --) 1531 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp); 1532 1533 return block; 1534 1535failure: 1536 return -1; 1537} 1538 1539 1540#define SIZE 256 1541 1542static inline int calculate_skip(int blocks) { 1543 int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES); 1544 return skip >= 7 ? 7 : skip + 1; 1545} 1546 1547 1548static int get_meta_index(struct inode *inode, int index, 1549 long long *index_block, int *index_offset, 1550 long long *data_block, char *block_list) 1551{ 1552 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 1553 struct squashfs_super_block *sblk = &msblk->sblk; 1554 int skip = calculate_skip(i_size_read(inode) >> sblk->block_log); 1555 int offset = 0; 1556 struct meta_index *meta; 1557 struct meta_entry *meta_entry; 1558 long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start; 1559 int cur_offset = SQUASHFS_I(inode)->offset; 1560 long long cur_data_block = SQUASHFS_I(inode)->start_block; 1561 int i; 1562 1563 index /= SQUASHFS_META_INDEXES * skip; 1564 1565 while (offset < index) { 1566 meta = locate_meta_index(inode, index, offset + 1); 1567 1568 if (meta == NULL) { 1569 meta = empty_meta_index(inode, offset + 1, skip); 1570 if (meta == NULL) 1571 goto all_done; 1572 } else { 1573 if(meta->entries == 0) 1574 goto failed; 1575 /* XXX */ 1576 offset = index < meta->offset + meta->entries ? index : 1577 meta->offset + meta->entries - 1; 1578 /* XXX */ 1579 meta_entry = &meta->meta_entry[offset - meta->offset]; 1580 cur_index_block = meta_entry->index_block + sblk->inode_table_start; 1581 cur_offset = meta_entry->offset; 1582 cur_data_block = meta_entry->data_block; 1583 TRACE("get_meta_index: offset %d, meta->offset %d, " 1584 "meta->entries %d\n", offset, meta->offset, meta->entries); 1585 TRACE("get_meta_index: index_block 0x%llx, offset 0x%x" 1586 " data_block 0x%llx\n", cur_index_block, 1587 cur_offset, cur_data_block); 1588 } 1589 1590 for (i = meta->offset + meta->entries; i <= index && 1591 i < meta->offset + SQUASHFS_META_ENTRIES; i++) { 1592 int blocks = skip * SQUASHFS_META_INDEXES; 1593 1594 while (blocks) { 1595 int block = blocks > (SIZE >> 2) ? (SIZE >> 2) : blocks; 1596 int res = read_block_index(inode->i_sb, block, block_list, 1597 &cur_index_block, &cur_offset); 1598 1599 if (res == -1) 1600 goto failed; 1601 1602 cur_data_block += res; 1603 blocks -= block; 1604 } 1605 1606 meta_entry = &meta->meta_entry[i - meta->offset]; 1607 meta_entry->index_block = cur_index_block - sblk->inode_table_start; 1608 meta_entry->offset = cur_offset; 1609 meta_entry->data_block = cur_data_block; 1610 meta->entries ++; 1611 offset ++; 1612 } 1613 1614 TRACE("get_meta_index: meta->offset %d, meta->entries %d\n", 1615 meta->offset, meta->entries); 1616 1617 release_meta_index(inode, meta); 1618 } 1619 1620all_done: 1621 *index_block = cur_index_block; 1622 *index_offset = cur_offset; 1623 *data_block = cur_data_block; 1624 1625 return offset * SQUASHFS_META_INDEXES * skip; 1626 1627failed: 1628 release_meta_index(inode, meta); 1629 return -1; 1630} 1631 1632 1633static long long read_blocklist(struct inode *inode, int index, 1634 int readahead_blks, char *block_list, 1635 unsigned short **block_p, unsigned int *bsize) 1636{ 1637 long long block_ptr; 1638 int offset; 1639 long long block; 1640 int res = get_meta_index(inode, index, &block_ptr, &offset, &block, 1641 block_list); 1642 1643 TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset" 1644 " 0x%x, block 0x%llx\n", res, index, block_ptr, offset, block); 1645 1646 if(res == -1) 1647 goto failure; 1648 1649 index -= res; 1650 1651 while (index) { 1652 int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index; 1653 int res = read_block_index(inode->i_sb, blocks, block_list, 1654 &block_ptr, &offset); 1655 if (res == -1) 1656 goto failure; 1657 block += res; 1658 index -= blocks; 1659 } 1660 1661 if (read_block_index(inode->i_sb, 1, block_list, &block_ptr, &offset) == -1) 1662 goto failure; 1663 *bsize = *((unsigned int *) block_list); 1664 1665 return block; 1666 1667failure: 1668 return 0; 1669} 1670 1671 1672static int squashfs_readpage(struct file *file, struct page *page) 1673{ 1674 struct inode *inode = page->mapping->host; 1675 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 1676 struct squashfs_super_block *sblk = &msblk->sblk; 1677 unsigned char *block_list; 1678 long long block; 1679 unsigned int bsize, i, bytes, byte_offset = 0; 1680 int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT); 1681 void *pageaddr; 1682 struct squashfs_fragment_cache *fragment = NULL; 1683 char *data_ptr = msblk->read_page; 1684 1685 int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1; 1686 int start_index = page->index & ~mask; 1687 int end_index = start_index | mask; 1688 1689 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", 1690 page->index, SQUASHFS_I(inode)->start_block); 1691 1692 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 1693 PAGE_CACHE_SHIFT)) { 1694 block_list = NULL; 1695 goto out; 1696 } 1697 1698 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK 1699 || index < (i_size_read(inode) >> sblk->block_log)) { 1700 block_list = kmalloc(SIZE, GFP_KERNEL); 1701 if (block_list == NULL) { 1702 ERROR("Failed to allocate block_list\n"); 1703 goto error_out; 1704 } 1705 1706 block = (msblk->read_blocklist)(inode, index, 1, block_list, NULL, &bsize); 1707 if (block == 0) 1708 goto error_out; 1709 1710 mutex_lock(&msblk->read_page_mutex); 1711 1712 bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block, bsize, 1713 NULL, sblk->block_size); 1714 1715 if (bytes == 0) { 1716 ERROR("Unable to read page, block %llx, size %x\n", block, bsize); 1717 mutex_unlock(&msblk->read_page_mutex); 1718 goto error_out; 1719 } 1720 } else { 1721 fragment = get_cached_fragment(inode->i_sb, 1722 SQUASHFS_I(inode)-> u.s1.fragment_start_block, 1723 SQUASHFS_I(inode)->u.s1.fragment_size); 1724 1725 if (fragment == NULL) { 1726 ERROR("Unable to read page, block %llx, size %x\n", 1727 SQUASHFS_I(inode)->u.s1.fragment_start_block, 1728 (int) SQUASHFS_I(inode)->u.s1.fragment_size); 1729 block_list = NULL; 1730 goto error_out; 1731 } 1732 bytes = SQUASHFS_I(inode)->u.s1.fragment_offset + 1733 (i_size_read(inode) & (sblk->block_size - 1)); 1734 byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset; 1735 data_ptr = fragment->data; 1736 } 1737 1738 for (i = start_index; i <= end_index && byte_offset < bytes; 1739 i++, byte_offset += PAGE_CACHE_SIZE) { 1740 struct page *push_page; 1741 int avail = min_t(unsigned int, bytes - byte_offset, PAGE_CACHE_SIZE); 1742 1743 TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n", 1744 bytes, i, byte_offset, avail); 1745 1746 push_page = (i == page->index) ? page : 1747 grab_cache_page_nowait(page->mapping, i); 1748 1749 if (!push_page) 1750 continue; 1751 1752 if (PageUptodate(push_page)) 1753 goto skip_page; 1754 1755 pageaddr = kmap_atomic(push_page, KM_USER0); 1756 memcpy(pageaddr, data_ptr + byte_offset, avail); 1757 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); 1758 kunmap_atomic(pageaddr, KM_USER0); 1759 flush_dcache_page(push_page); 1760 SetPageUptodate(push_page); 1761skip_page: 1762 unlock_page(push_page); 1763 if(i != page->index) 1764 page_cache_release(push_page); 1765 } 1766 1767 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK 1768 || index < (i_size_read(inode) >> sblk->block_log)) { 1769 mutex_unlock(&msblk->read_page_mutex); 1770 kfree(block_list); 1771 } else 1772 release_cached_fragment(msblk, fragment); 1773 1774 return 0; 1775 1776error_out: 1777 SetPageError(page); 1778out: 1779 pageaddr = kmap_atomic(page, KM_USER0); 1780 memset(pageaddr, 0, PAGE_CACHE_SIZE); 1781 kunmap_atomic(pageaddr, KM_USER0); 1782 flush_dcache_page(page); 1783 if (!PageError(page)) 1784 SetPageUptodate(page); 1785 unlock_page(page); 1786 1787 kfree(block_list); 1788 return 0; 1789} 1790 1791 1792static int get_dir_index_using_offset(struct super_block *s, 1793 long long *next_block, unsigned int *next_offset, 1794 long long index_start, unsigned int index_offset, int i_count, 1795 long long f_pos) 1796{ 1797 struct squashfs_sb_info *msblk = s->s_fs_info; 1798 struct squashfs_super_block *sblk = &msblk->sblk; 1799 int i, length = 0; 1800 struct squashfs_dir_index index; 1801 1802 TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n", 1803 i_count, (unsigned int) f_pos); 1804 1805 f_pos =- 3; 1806 if (f_pos == 0) 1807 goto finish; 1808 1809 for (i = 0; i < i_count; i++) { 1810 if (msblk->swap) { 1811 struct squashfs_dir_index sindex; 1812 squashfs_get_cached_block(s, &sindex, index_start, index_offset, 1813 sizeof(sindex), &index_start, &index_offset); 1814 SQUASHFS_SWAP_DIR_INDEX(&index, &sindex); 1815 } else 1816 squashfs_get_cached_block(s, &index, index_start, index_offset, 1817 sizeof(index), &index_start, &index_offset); 1818 1819 if (index.index > f_pos) 1820 break; 1821 1822 squashfs_get_cached_block(s, NULL, index_start, index_offset, 1823 index.size + 1, &index_start, &index_offset); 1824 1825 length = index.index; 1826 *next_block = index.start_block + sblk->directory_table_start; 1827 } 1828 1829 *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE; 1830 1831finish: 1832 return length + 3; 1833} 1834 1835 1836static int get_dir_index_using_name(struct super_block *s, 1837 long long *next_block, unsigned int *next_offset, 1838 long long index_start, unsigned int index_offset, int i_count, 1839 const char *name, int size) 1840{ 1841 struct squashfs_sb_info *msblk = s->s_fs_info; 1842 struct squashfs_super_block *sblk = &msblk->sblk; 1843 int i, length = 0; 1844 struct squashfs_dir_index *index; 1845 char *str; 1846 1847 TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count); 1848 1849 str = kmalloc(sizeof(struct squashfs_dir_index) + 1850 (SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL); 1851 if (str == NULL) { 1852 ERROR("Failed to allocate squashfs_dir_index\n"); 1853 goto failure; 1854 } 1855 1856 index = (struct squashfs_dir_index *) (str + SQUASHFS_NAME_LEN + 1); 1857 strncpy(str, name, size); 1858 str[size] = '\0'; 1859 1860 for (i = 0; i < i_count; i++) { 1861 if (msblk->swap) { 1862 struct squashfs_dir_index sindex; 1863 squashfs_get_cached_block(s, &sindex, index_start, index_offset, 1864 sizeof(sindex), &index_start, &index_offset); 1865 SQUASHFS_SWAP_DIR_INDEX(index, &sindex); 1866 } else 1867 squashfs_get_cached_block(s, index, index_start, index_offset, 1868 sizeof(struct squashfs_dir_index), &index_start, &index_offset); 1869 1870 squashfs_get_cached_block(s, index->name, index_start, index_offset, 1871 index->size + 1, &index_start, &index_offset); 1872 1873 index->name[index->size + 1] = '\0'; 1874 1875 if (strcmp(index->name, str) > 0) 1876 break; 1877 1878 length = index->index; 1879 *next_block = index->start_block + sblk->directory_table_start; 1880 } 1881 1882 *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE; 1883 kfree(str); 1884 1885failure: 1886 return length + 3; 1887} 1888 1889 1890static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) 1891{ 1892 struct inode *i = file->f_dentry->d_inode; 1893 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info; 1894 struct squashfs_super_block *sblk = &msblk->sblk; 1895 long long next_block = SQUASHFS_I(i)->start_block + 1896 sblk->directory_table_start; 1897 int next_offset = SQUASHFS_I(i)->offset, length = 0, dir_count; 1898 struct squashfs_dir_header dirh; 1899 struct squashfs_dir_entry *dire; 1900 1901 TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset); 1902 1903 dire = kmalloc(sizeof(struct squashfs_dir_entry) + 1904 SQUASHFS_NAME_LEN + 1, GFP_KERNEL); 1905 if (dire == NULL) { 1906 ERROR("Failed to allocate squashfs_dir_entry\n"); 1907 goto finish; 1908 } 1909 1910 while(file->f_pos < 3) { 1911 char *name; 1912 int size, i_ino; 1913 1914 if(file->f_pos == 0) { 1915 name = "."; 1916 size = 1; 1917 i_ino = i->i_ino; 1918 } else { 1919 name = ".."; 1920 size = 2; 1921 i_ino = SQUASHFS_I(i)->u.s2.parent_inode; 1922 } 1923 TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n", 1924 (unsigned int) dirent, name, size, (int) 1925 file->f_pos, i_ino, squashfs_filetype_table[1]); 1926 1927 if (filldir(dirent, name, size, file->f_pos, i_ino, 1928 squashfs_filetype_table[1]) < 0) { 1929 TRACE("Filldir returned less than 0\n"); 1930 goto finish; 1931 } 1932 file->f_pos += size; 1933 } 1934 1935 length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset, 1936 SQUASHFS_I(i)->u.s2.directory_index_start, 1937 SQUASHFS_I(i)->u.s2.directory_index_offset, 1938 SQUASHFS_I(i)->u.s2.directory_index_count, file->f_pos); 1939 1940 while (length < i_size_read(i)) { 1941 /* read directory header */ 1942 if (msblk->swap) { 1943 struct squashfs_dir_header sdirh; 1944 1945 if (!squashfs_get_cached_block(i->i_sb, &sdirh, next_block, 1946 next_offset, sizeof(sdirh), &next_block, &next_offset)) 1947 goto failed_read; 1948 1949 length += sizeof(sdirh); 1950 SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh); 1951 } else { 1952 if (!squashfs_get_cached_block(i->i_sb, &dirh, next_block, 1953 next_offset, sizeof(dirh), &next_block, &next_offset)) 1954 goto failed_read; 1955 1956 length += sizeof(dirh); 1957 } 1958 1959 dir_count = dirh.count + 1; 1960 while (dir_count--) { 1961 if (msblk->swap) { 1962 struct squashfs_dir_entry sdire; 1963 if (!squashfs_get_cached_block(i->i_sb, &sdire, next_block, 1964 next_offset, sizeof(sdire), &next_block, &next_offset)) 1965 goto failed_read; 1966 1967 length += sizeof(sdire); 1968 SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire); 1969 } else { 1970 if (!squashfs_get_cached_block(i->i_sb, dire, next_block, 1971 next_offset, sizeof(*dire), &next_block, &next_offset)) 1972 goto failed_read; 1973 1974 length += sizeof(*dire); 1975 } 1976 1977 if (!squashfs_get_cached_block(i->i_sb, dire->name, next_block, 1978 next_offset, dire->size + 1, &next_block, &next_offset)) 1979 goto failed_read; 1980 1981 length += dire->size + 1; 1982 1983 if (file->f_pos >= length) 1984 continue; 1985 1986 dire->name[dire->size + 1] = '\0'; 1987 1988 TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n", 1989 (unsigned int) dirent, dire->name, dire->size + 1, 1990 (int) file->f_pos, dirh.start_block, dire->offset, 1991 dirh.inode_number + dire->inode_number, 1992 squashfs_filetype_table[dire->type]); 1993 1994 if (filldir(dirent, dire->name, dire->size + 1, file->f_pos, 1995 dirh.inode_number + dire->inode_number, 1996 squashfs_filetype_table[dire->type]) < 0) { 1997 TRACE("Filldir returned less than 0\n"); 1998 goto finish; 1999 } 2000 file->f_pos = length; 2001 } 2002 } 2003 2004finish: 2005 kfree(dire); 2006 return 0; 2007 2008failed_read: 2009 ERROR("Unable to read directory block [%llx:%x]\n", next_block, 2010 next_offset); 2011 kfree(dire); 2012 return 0; 2013} 2014 2015 2016static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry, 2017 struct nameidata *nd) 2018{ 2019 const unsigned char *name = dentry->d_name.name; 2020 int len = dentry->d_name.len; 2021 struct inode *inode = NULL; 2022 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info; 2023 struct squashfs_super_block *sblk = &msblk->sblk; 2024 long long next_block = SQUASHFS_I(i)->start_block + 2025 sblk->directory_table_start; 2026 int next_offset = SQUASHFS_I(i)->offset, length = 0, dir_count; 2027 struct squashfs_dir_header dirh; 2028 struct squashfs_dir_entry *dire; 2029 2030 TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset); 2031 2032 dire = kmalloc(sizeof(struct squashfs_dir_entry) + 2033 SQUASHFS_NAME_LEN + 1, GFP_KERNEL); 2034 if (dire == NULL) { 2035 ERROR("Failed to allocate squashfs_dir_entry\n"); 2036 goto exit_lookup; 2037 } 2038 2039 if (len > SQUASHFS_NAME_LEN) 2040 goto exit_lookup; 2041 2042 length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset, 2043 SQUASHFS_I(i)->u.s2.directory_index_start, 2044 SQUASHFS_I(i)->u.s2.directory_index_offset, 2045 SQUASHFS_I(i)->u.s2.directory_index_count, name, len); 2046 2047 while (length < i_size_read(i)) { 2048 /* read directory header */ 2049 if (msblk->swap) { 2050 struct squashfs_dir_header sdirh; 2051 if (!squashfs_get_cached_block(i->i_sb, &sdirh, next_block, 2052 next_offset, sizeof(sdirh), &next_block, &next_offset)) 2053 goto failed_read; 2054 2055 length += sizeof(sdirh); 2056 SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh); 2057 } else { 2058 if (!squashfs_get_cached_block(i->i_sb, &dirh, next_block, 2059 next_offset, sizeof(dirh), &next_block, &next_offset)) 2060 goto failed_read; 2061 2062 length += sizeof(dirh); 2063 } 2064 2065 dir_count = dirh.count + 1; 2066 while (dir_count--) { 2067 if (msblk->swap) { 2068 struct squashfs_dir_entry sdire; 2069 if (!squashfs_get_cached_block(i->i_sb, &sdire, next_block, 2070 next_offset, sizeof(sdire), &next_block, &next_offset)) 2071 goto failed_read; 2072 2073 length += sizeof(sdire); 2074 SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire); 2075 } else { 2076 if (!squashfs_get_cached_block(i->i_sb, dire, next_block, 2077 next_offset, sizeof(*dire), &next_block, &next_offset)) 2078 goto failed_read; 2079 2080 length += sizeof(*dire); 2081 } 2082 2083 if (!squashfs_get_cached_block(i->i_sb, dire->name, next_block, 2084 next_offset, dire->size + 1, &next_block, &next_offset)) 2085 goto failed_read; 2086 2087 length += dire->size + 1; 2088 2089 if (name[0] < dire->name[0]) 2090 goto exit_lookup; 2091 2092 if ((len == dire->size + 1) && !strncmp(name, dire->name, len)) { 2093 squashfs_inode_t ino = SQUASHFS_MKINODE(dirh.start_block, 2094 dire->offset); 2095 2096 TRACE("calling squashfs_iget for directory entry %s, inode" 2097 " %x:%x, %d\n", name, dirh.start_block, dire->offset, 2098 dirh.inode_number + dire->inode_number); 2099 2100 inode = squashfs_iget(i->i_sb, ino, dirh.inode_number + dire->inode_number); 2101 2102 goto exit_lookup; 2103 } 2104 } 2105 } 2106 2107exit_lookup: 2108 kfree(dire); 2109 if (inode) 2110 return d_splice_alias(inode, dentry); 2111 d_add(dentry, inode); 2112 return ERR_PTR(0); 2113 2114failed_read: 2115 ERROR("Unable to read directory block [%llx:%x]\n", next_block, 2116 next_offset); 2117 goto exit_lookup; 2118} 2119 2120 2121static int squashfs_remount(struct super_block *s, int *flags, char *data) 2122{ 2123 *flags |= MS_RDONLY; 2124 return 0; 2125} 2126 2127 2128static void squashfs_put_super(struct super_block *s) 2129{ 2130 int i; 2131 2132 if (s->s_fs_info) { 2133 struct squashfs_sb_info *sbi = s->s_fs_info; 2134 if (sbi->block_cache) 2135 for (i = 0; i < squashfs_cached_blks; i++) 2136 if (sbi->block_cache[i].block != SQUASHFS_INVALID_BLK) 2137 vfree(sbi->block_cache[i].data); 2138 if (sbi->fragment) 2139 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) 2140 vfree(sbi->fragment[i].data); 2141 kfree(sbi->fragment); 2142 kfree(sbi->block_cache); 2143 kfree(sbi->read_page); 2144 kfree(sbi->uid); 2145 kfree(sbi->fragment_index); 2146 kfree(sbi->fragment_index_2); 2147 kfree(sbi->meta_index); 2148 vfree(sbi->stream.workspace); 2149 kfree(s->s_fs_info); 2150 s->s_fs_info = NULL; 2151 } 2152} 2153 2154 2155static int squashfs_get_sb(struct file_system_type *fs_type, int flags, 2156 const char *dev_name, void *data, struct vfsmount *mnt) 2157{ 2158 return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super, 2159 mnt); 2160} 2161 2162 2163static int __init init_squashfs_fs(void) 2164{ 2165 int err = init_inodecache(); 2166 if (err) 2167 goto out; 2168 2169 printk(KERN_INFO "squashfs: version 3.2-r2-CVS (2007/07/17) " 2170 "Phillip Lougher\n"); 2171 2172 err = register_filesystem(&squashfs_fs_type); 2173 if (err) 2174 destroy_inodecache(); 2175 2176out: 2177 return err; 2178} 2179 2180 2181static void __exit exit_squashfs_fs(void) 2182{ 2183 unregister_filesystem(&squashfs_fs_type); 2184 destroy_inodecache(); 2185} 2186 2187 2188static struct kmem_cache * squashfs_inode_cachep; 2189 2190 2191static struct inode *squashfs_alloc_inode(struct super_block *sb) 2192{ 2193 struct squashfs_inode_info *ei; 2194 ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL); 2195 return ei ? &ei->vfs_inode : NULL; 2196} 2197 2198 2199static void squashfs_destroy_inode(struct inode *inode) 2200{ 2201 kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode)); 2202} 2203 2204 2205static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 2206{ 2207 struct squashfs_inode_info *ei = foo; 2208 2209 inode_init_once(&ei->vfs_inode); 2210} 2211 2212 2213static int __init init_inodecache(void) 2214{ 2215 squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache", 2216 sizeof(struct squashfs_inode_info), 0, 2217 SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, init_once, NULL); 2218 if (squashfs_inode_cachep == NULL) 2219 return -ENOMEM; 2220 return 0; 2221} 2222 2223 2224static void destroy_inodecache(void) 2225{ 2226 kmem_cache_destroy(squashfs_inode_cachep); 2227} 2228 2229 2230module_init(init_squashfs_fs); 2231module_exit(exit_squashfs_fs); 2232MODULE_DESCRIPTION("squashfs 3.2-r2-CVS, a compressed read-only filesystem"); 2233MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>"); 2234MODULE_LICENSE("GPL"); 2235