1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * cache.c
22 */
23
24/*
25 * Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
26 * recently accessed data Squashfs uses two small metadata and fragment caches.
27 *
28 * This file implements a generic cache implementation used for both caches,
29 * plus functions layered ontop of the generic cache implementation to
30 * access the metadata and fragment caches.
31 *
32 * To avoid out of memory and fragmentation isssues with vmalloc the cache
33 * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
34 *
35 * It should be noted that the cache is not used for file datablocks, these
36 * are decompressed and cached in the page-cache in the normal way.  The
37 * cache is only used to temporarily cache fragment and metadata blocks
38 * which have been read as as a result of a metadata (i.e. inode or
39 * directory) or fragment access.  Because metadata and fragments are packed
40 * together into blocks (to gain greater compression) the read of a particular
41 * piece of metadata or fragment will retrieve other metadata/fragments which
42 * have been packed with it, these because of locality-of-reference may be read
43 * in the near future. Temporarily caching them ensures they are available for
44 * near future access without requiring an additional read and decompress.
45 */
46
47#include <linux/fs.h>
48#include <linux/vfs.h>
49#include <linux/slab.h>
50#include <linux/vmalloc.h>
51#include <linux/sched.h>
52#include <linux/spinlock.h>
53#include <linux/wait.h>
54#include <linux/zlib.h>
55#include <linux/pagemap.h>
56
57#include "squashfs_fs.h"
58#include "squashfs_fs_sb.h"
59#include "squashfs_fs_i.h"
60#include "squashfs.h"
61
62/*
63 * Look-up block in cache, and increment usage count.  If not in cache, read
64 * and decompress it from disk.
65 */
66struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
67	struct squashfs_cache *cache, u64 block, int length)
68{
69	int i, n;
70	struct squashfs_cache_entry *entry;
71
72	spin_lock(&cache->lock);
73
74	while (1) {
75		for (i = 0; i < cache->entries; i++)
76			if (cache->entry[i].block == block)
77				break;
78
79		if (i == cache->entries) {
80			/*
81			 * Block not in cache, if all cache entries are used
82			 * go to sleep waiting for one to become available.
83			 */
84			if (cache->unused == 0) {
85				cache->num_waiters++;
86				spin_unlock(&cache->lock);
87				wait_event(cache->wait_queue, cache->unused);
88				spin_lock(&cache->lock);
89				cache->num_waiters--;
90				continue;
91			}
92
93			/*
94			 * At least one unused cache entry.  A simple
95			 * round-robin strategy is used to choose the entry to
96			 * be evicted from the cache.
97			 */
98			i = cache->next_blk;
99			for (n = 0; n < cache->entries; n++) {
100				if (cache->entry[i].refcount == 0)
101					break;
102				i = (i + 1) % cache->entries;
103			}
104
105			cache->next_blk = (i + 1) % cache->entries;
106			entry = &cache->entry[i];
107
108			/*
109			 * Initialise choosen cache entry, and fill it in from
110			 * disk.
111			 */
112			cache->unused--;
113			entry->block = block;
114			entry->refcount = 1;
115			entry->pending = 1;
116			entry->num_waiters = 0;
117			entry->error = 0;
118			spin_unlock(&cache->lock);
119
120			entry->length = squashfs_read_data(sb, entry->data,
121				block, length, &entry->next_index,
122				cache->block_size);
123
124			spin_lock(&cache->lock);
125
126			if (entry->length < 0)
127				entry->error = entry->length;
128
129			entry->pending = 0;
130
131			/*
132			 * While filling this entry one or more other processes
133			 * have looked it up in the cache, and have slept
134			 * waiting for it to become available.
135			 */
136			if (entry->num_waiters) {
137				spin_unlock(&cache->lock);
138				wake_up_all(&entry->wait_queue);
139			} else
140				spin_unlock(&cache->lock);
141
142			goto out;
143		}
144
145		/*
146		 * Block already in cache.  Increment refcount so it doesn't
147		 * get reused until we're finished with it, if it was
148		 * previously unused there's one less cache entry available
149		 * for reuse.
150		 */
151		entry = &cache->entry[i];
152		if (entry->refcount == 0)
153			cache->unused--;
154		entry->refcount++;
155
156		/*
157		 * If the entry is currently being filled in by another process
158		 * go to sleep waiting for it to become available.
159		 */
160		if (entry->pending) {
161			entry->num_waiters++;
162			spin_unlock(&cache->lock);
163			wait_event(entry->wait_queue, !entry->pending);
164		} else
165			spin_unlock(&cache->lock);
166
167		goto out;
168	}
169
170out:
171	TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
172		cache->name, i, entry->block, entry->refcount, entry->error);
173
174	if (entry->error)
175		ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
176							block);
177	return entry;
178}
179
180
181/*
182 * Release cache entry, once usage count is zero it can be reused.
183 */
184void squashfs_cache_put(struct squashfs_cache_entry *entry)
185{
186	struct squashfs_cache *cache = entry->cache;
187
188	spin_lock(&cache->lock);
189	entry->refcount--;
190	if (entry->refcount == 0) {
191		cache->unused++;
192		/*
193		 * If there's any processes waiting for a block to become
194		 * available, wake one up.
195		 */
196		if (cache->num_waiters) {
197			spin_unlock(&cache->lock);
198			wake_up(&cache->wait_queue);
199			return;
200		}
201	}
202	spin_unlock(&cache->lock);
203}
204
205/*
206 * Delete cache reclaiming all kmalloced buffers.
207 */
208void squashfs_cache_delete(struct squashfs_cache *cache)
209{
210	int i, j;
211
212	if (cache == NULL)
213		return;
214
215	for (i = 0; i < cache->entries; i++) {
216		if (cache->entry[i].data) {
217			for (j = 0; j < cache->pages; j++)
218				kfree(cache->entry[i].data[j]);
219			kfree(cache->entry[i].data);
220		}
221	}
222
223	kfree(cache->entry);
224	kfree(cache);
225}
226
227
228/*
229 * Initialise cache allocating the specified number of entries, each of
230 * size block_size.  To avoid vmalloc fragmentation issues each entry
231 * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
232 */
233struct squashfs_cache *squashfs_cache_init(char *name, int entries,
234	int block_size)
235{
236	int i, j;
237	struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
238
239	if (cache == NULL) {
240		ERROR("Failed to allocate %s cache\n", name);
241		return NULL;
242	}
243
244	cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
245	if (cache->entry == NULL) {
246		ERROR("Failed to allocate %s cache\n", name);
247		goto cleanup;
248	}
249
250	cache->next_blk = 0;
251	cache->unused = entries;
252	cache->entries = entries;
253	cache->block_size = block_size;
254	cache->pages = block_size >> PAGE_CACHE_SHIFT;
255	cache->name = name;
256	cache->num_waiters = 0;
257	spin_lock_init(&cache->lock);
258	init_waitqueue_head(&cache->wait_queue);
259
260	for (i = 0; i < entries; i++) {
261		struct squashfs_cache_entry *entry = &cache->entry[i];
262
263		init_waitqueue_head(&cache->entry[i].wait_queue);
264		entry->cache = cache;
265		entry->block = SQUASHFS_INVALID_BLK;
266		entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
267		if (entry->data == NULL) {
268			ERROR("Failed to allocate %s cache entry\n", name);
269			goto cleanup;
270		}
271
272		for (j = 0; j < cache->pages; j++) {
273			entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
274			if (entry->data[j] == NULL) {
275				ERROR("Failed to allocate %s buffer\n", name);
276				goto cleanup;
277			}
278		}
279	}
280
281	return cache;
282
283cleanup:
284	squashfs_cache_delete(cache);
285	return NULL;
286}
287
288
289/*
290 * Copy upto length bytes from cache entry to buffer starting at offset bytes
291 * into the cache entry.  If there's not length bytes then copy the number of
292 * bytes available.  In all cases return the number of bytes copied.
293 */
294int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
295		int offset, int length)
296{
297	int remaining = length;
298
299	if (length == 0)
300		return 0;
301	else if (buffer == NULL)
302		return min(length, entry->length - offset);
303
304	while (offset < entry->length) {
305		void *buff = entry->data[offset / PAGE_CACHE_SIZE]
306				+ (offset % PAGE_CACHE_SIZE);
307		int bytes = min_t(int, entry->length - offset,
308				PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
309
310		if (bytes >= remaining) {
311			memcpy(buffer, buff, remaining);
312			remaining = 0;
313			break;
314		}
315
316		memcpy(buffer, buff, bytes);
317		buffer += bytes;
318		remaining -= bytes;
319		offset += bytes;
320	}
321
322	return length - remaining;
323}
324
325
326/*
327 * Read length bytes from metadata position <block, offset> (block is the
328 * start of the compressed block on disk, and offset is the offset into
329 * the block once decompressed).  Data is packed into consecutive blocks,
330 * and length bytes may require reading more than one block.
331 */
332int squashfs_read_metadata(struct super_block *sb, void *buffer,
333		u64 *block, int *offset, int length)
334{
335	struct squashfs_sb_info *msblk = sb->s_fs_info;
336	int bytes, copied = length;
337	struct squashfs_cache_entry *entry;
338
339	TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
340
341	while (length) {
342		entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
343		if (entry->error)
344			return entry->error;
345		else if (*offset >= entry->length)
346			return -EIO;
347
348		bytes = squashfs_copy_data(buffer, entry, *offset, length);
349		if (buffer)
350			buffer += bytes;
351		length -= bytes;
352		*offset += bytes;
353
354		if (*offset == entry->length) {
355			*block = entry->next_index;
356			*offset = 0;
357		}
358
359		squashfs_cache_put(entry);
360	}
361
362	return copied;
363}
364
365
366/*
367 * Look-up in the fragmment cache the fragment located at <start_block> in the
368 * filesystem.  If necessary read and decompress it from disk.
369 */
370struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
371				u64 start_block, int length)
372{
373	struct squashfs_sb_info *msblk = sb->s_fs_info;
374
375	return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
376		length);
377}
378
379
380/*
381 * Read and decompress the datablock located at <start_block> in the
382 * filesystem.  The cache is used here to avoid duplicating locking and
383 * read/decompress code.
384 */
385struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
386				u64 start_block, int length)
387{
388	struct squashfs_sb_info *msblk = sb->s_fs_info;
389
390	return squashfs_cache_get(sb, msblk->read_page, start_block, length);
391}
392
393
394/*
395 * Read a filesystem table (uncompressed sequence of bytes) from disk
396 */
397int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
398	int length)
399{
400	int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
401	int i, res;
402	void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
403	if (data == NULL)
404		return -ENOMEM;
405
406	for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
407		data[i] = buffer;
408	res = squashfs_read_data(sb, data, block, length |
409		SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length);
410	kfree(data);
411	return res;
412}
413