cache.c revision a3e28ecaf227d2c03ec6724dd718e3c5a12b74ef
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * cache.c
22 */
23
24/*
25 * Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
26 * recently accessed data Squashfs uses two small metadata and fragment caches.
27 *
28 * This file implements a generic cache implementation used for both caches,
29 * plus functions layered ontop of the generic cache implementation to
30 * access the metadata and fragment caches.
31 */
32
33#include <linux/fs.h>
34#include <linux/vfs.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37#include <linux/sched.h>
38#include <linux/spinlock.h>
39#include <linux/wait.h>
40#include <linux/zlib.h>
41#include <linux/squashfs_fs.h>
42#include <linux/squashfs_fs_sb.h>
43#include <linux/squashfs_fs_i.h>
44
45#include "squashfs.h"
46
47/*
48 * Look-up block in cache, and increment usage count.  If not in cache, read
49 * and decompress it from disk.
50 */
51struct squashfs_cache_entry *squashfs_cache_get(struct super_block *s,
52	struct squashfs_cache *cache, long long block, int length)
53{
54	int i, n;
55	struct squashfs_cache_entry *entry;
56
57	spin_lock(&cache->lock);
58
59	while (1) {
60		for (i = 0; i < cache->entries; i++)
61			if (cache->entry[i].block == block)
62				break;
63
64		if (i == cache->entries) {
65			/*
66 			 * Block not in cache, if all cache entries are locked
67 			 * go to sleep waiting for one to become available.
68 			 */
69			if (cache->unused == 0) {
70				cache->waiting++;
71				spin_unlock(&cache->lock);
72				wait_event(cache->wait_queue, cache->unused);
73				spin_lock(&cache->lock);
74				cache->waiting--;
75				continue;
76			}
77
78			/*
79			 * At least one unlocked cache entry.  A simple
80 			 * round-robin strategy is used to choose the entry to
81 			 * be evicted from the cache.
82 			 */
83			i = cache->next_blk;
84			for (n = 0; n < cache->entries; n++) {
85				if (cache->entry[i].locked == 0)
86					break;
87				i = (i + 1) % cache->entries;
88			}
89
90			cache->next_blk = (i + 1) % cache->entries;
91			entry = &cache->entry[i];
92
93			/*
94			 * Initialise choosen cache entry, and fill it in from
95 			 * disk.
96 			 */
97			cache->unused--;
98			entry->block = block;
99			entry->locked = 1;
100			entry->pending = 1;
101			entry->waiting = 0;
102			entry->error = 0;
103			spin_unlock(&cache->lock);
104
105			entry->length = squashfs_read_data(s, entry->data,
106				block, length, &entry->next_index,
107				cache->block_size);
108
109			spin_lock(&cache->lock);
110
111			if (entry->length == 0)
112				entry->error = 1;
113
114			entry->pending = 0;
115			spin_unlock(&cache->lock);
116
117			/*
118 			 * While filling this entry one or more other processes
119 			 * have looked it up in the cache, and have slept
120 			 * waiting for it to become available.
121 			 */
122			if (entry->waiting)
123				wake_up_all(&entry->wait_queue);
124			goto out;
125		}
126
127		/*
128		 * Block already in cache.  Increment lock so it doesn't
129 		 * get reused until we're finished with it, if it was
130 		 * previously unlocked there's one less cache entry available
131 		 * for reuse.
132 		 */
133		entry = &cache->entry[i];
134		if (entry->locked == 0)
135			cache->unused--;
136		entry->locked++;
137
138		/*
139		 * If the entry is currently being filled in by another process
140 		 * go to sleep waiting for it to become available.
141 		 */
142		if (entry->pending) {
143			entry->waiting++;
144			spin_unlock(&cache->lock);
145			wait_event(entry->wait_queue, !entry->pending);
146			goto out;
147		}
148
149		spin_unlock(&cache->lock);
150		goto out;
151	}
152
153out:
154	TRACE("Got %s %d, start block %lld, locked %d, error %d\n", cache->name,
155		i, entry->block, entry->locked, entry->error);
156
157	if (entry->error)
158		ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
159							block);
160	return entry;
161}
162
163
164/*
165 * Release block, once usage count is zero it can be reused.
166 */
167void squashfs_cache_put(struct squashfs_cache *cache,
168				struct squashfs_cache_entry *entry)
169{
170	spin_lock(&cache->lock);
171	entry->locked--;
172	if (entry->locked == 0) {
173		cache->unused++;
174		spin_unlock(&cache->lock);
175		/*
176 		 * If there's any processes waiting for a block to become
177 		 * available, wake one up.
178 		 */
179		if (cache->waiting)
180			wake_up(&cache->wait_queue);
181	} else {
182		spin_unlock(&cache->lock);
183	}
184}
185
186
187void squashfs_cache_delete(struct squashfs_cache *cache)
188{
189	int i;
190
191	if (cache == NULL)
192		return;
193
194	for (i = 0; i < cache->entries; i++)
195		if (cache->entry[i].data) {
196			if (cache->use_vmalloc)
197				vfree(cache->entry[i].data);
198			else
199				kfree(cache->entry[i].data);
200		}
201
202	kfree(cache);
203}
204
205
206struct squashfs_cache *squashfs_cache_init(char *name, int entries,
207	int block_size, int use_vmalloc)
208{
209	int i;
210	struct squashfs_cache *cache = kzalloc(sizeof(*cache) + entries *
211			sizeof(*(cache->entry)), GFP_KERNEL);
212
213	if (cache == NULL) {
214		ERROR("Failed to allocate %s cache\n", name);
215		goto failed;
216	}
217
218	cache->next_blk = 0;
219	cache->unused = entries;
220	cache->entries = entries;
221	cache->block_size = block_size;
222	cache->use_vmalloc = use_vmalloc;
223	cache->name = name;
224	cache->waiting = 0;
225	spin_lock_init(&cache->lock);
226	init_waitqueue_head(&cache->wait_queue);
227
228	for (i = 0; i < entries; i++) {
229		init_waitqueue_head(&cache->entry[i].wait_queue);
230		cache->entry[i].block = SQUASHFS_INVALID_BLK;
231		cache->entry[i].data = use_vmalloc ? vmalloc(block_size) :
232				kmalloc(block_size, GFP_KERNEL);
233		if (cache->entry[i].data == NULL) {
234			ERROR("Failed to allocate %s cache entry\n", name);
235			goto cleanup;
236		}
237	}
238
239	return cache;
240
241cleanup:
242	squashfs_cache_delete(cache);
243failed:
244	return NULL;
245}
246
247
248/*
249 * Read length bytes from metadata position <block, offset> (block is the
250 * start of the compressed block on disk, and offset is the offset into
251 * the block once decompressed).  Data is packed into consecutive blocks,
252 * and length bytes may require reading more than one block.
253 */
254int squashfs_read_metadata(struct super_block *s, void *buffer,
255				long long block, unsigned int offset,
256				int length, long long *next_block,
257				unsigned int *next_offset)
258{
259	struct squashfs_sb_info *msblk = s->s_fs_info;
260	int bytes, return_length = length;
261	struct squashfs_cache_entry *entry;
262
263	TRACE("Entered squashfs_read_metadata [%llx:%x]\n", block, offset);
264
265	while (1) {
266		entry = squashfs_cache_get(s, msblk->block_cache, block, 0);
267		bytes = entry->length - offset;
268
269		if (entry->error || bytes < 1) {
270			return_length = 0;
271			goto finish;
272		} else if (bytes >= length) {
273			if (buffer)
274				memcpy(buffer, entry->data + offset, length);
275			if (entry->length - offset == length) {
276				*next_block = entry->next_index;
277				*next_offset = 0;
278			} else {
279				*next_block = block;
280				*next_offset = offset + length;
281			}
282			goto finish;
283		} else {
284			if (buffer) {
285				memcpy(buffer, entry->data + offset, bytes);
286				buffer += bytes;
287			}
288			block = entry->next_index;
289			squashfs_cache_put(msblk->block_cache, entry);
290			length -= bytes;
291			offset = 0;
292		}
293	}
294
295finish:
296	squashfs_cache_put(msblk->block_cache, entry);
297	return return_length;
298}
299
300
301struct squashfs_cache_entry *get_cached_fragment(struct super_block *s,
302				long long start_block, int length)
303{
304	struct squashfs_sb_info *msblk = s->s_fs_info;
305
306	return squashfs_cache_get(s, msblk->fragment_cache, start_block,
307		length);
308}
309
310
311void release_cached_fragment(struct squashfs_sb_info *msblk,
312				struct squashfs_cache_entry *fragment)
313{
314	squashfs_cache_put(msblk->fragment_cache, fragment);
315}
316
317