Lines Matching refs:cache

21  * cache.c
28 * This file implements a generic cache implementation used for both caches,
29 * plus functions layered ontop of the generic cache implementation to
32 * To avoid out of memory and fragmentation isssues with vmalloc the cache
35 * It should be noted that the cache is not used for file datablocks, these
36 * are decompressed and cached in the page-cache in the normal way. The
37 * cache is only used to temporarily cache fragment and metadata blocks
63 * Look-up block in cache, and increment usage count. If not in cache, read
67 struct squashfs_cache *cache, u64 block, int length)
72 spin_lock(&cache->lock);
75 for (i = 0; i < cache->entries; i++)
76 if (cache->entry[i].block == block)
79 if (i == cache->entries) {
81 * Block not in cache, if all cache entries are used
84 if (cache->unused == 0) {
85 cache->num_waiters++;
86 spin_unlock(&cache->lock);
87 wait_event(cache->wait_queue, cache->unused);
88 spin_lock(&cache->lock);
89 cache->num_waiters--;
94 * At least one unused cache entry. A simple
96 * be evicted from the cache.
98 i = cache->next_blk;
99 for (n = 0; n < cache->entries; n++) {
100 if (cache->entry[i].refcount == 0)
102 i = (i + 1) % cache->entries;
105 cache->next_blk = (i + 1) % cache->entries;
106 entry = &cache->entry[i];
109 * Initialise choosen cache entry, and fill it in from
112 cache->unused--;
118 spin_unlock(&cache->lock);
122 cache->block_size);
124 spin_lock(&cache->lock);
133 * have looked it up in the cache, and have slept
137 spin_unlock(&cache->lock);
140 spin_unlock(&cache->lock);
146 * Block already in cache. Increment refcount so it doesn't
148 * previously unused there's one less cache entry available
151 entry = &cache->entry[i];
153 cache->unused--;
162 spin_unlock(&cache->lock);
165 spin_unlock(&cache->lock);
172 cache->name, i, entry->block, entry->refcount, entry->error);
175 ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
182 * Release cache entry, once usage count is zero it can be reused.
186 struct squashfs_cache *cache = entry->cache;
188 spin_lock(&cache->lock);
191 cache->unused++;
196 if (cache->num_waiters) {
197 spin_unlock(&cache->lock);
198 wake_up(&cache->wait_queue);
202 spin_unlock(&cache->lock);
206 * Delete cache reclaiming all kmalloced buffers.
208 void squashfs_cache_delete(struct squashfs_cache *cache)
212 if (cache == NULL)
215 for (i = 0; i < cache->entries; i++) {
216 if (cache->entry[i].data) {
217 for (j = 0; j < cache->pages; j++)
218 kfree(cache->entry[i].data[j]);
219 kfree(cache->entry[i].data);
223 kfree(cache->entry);
224 kfree(cache);
229 * Initialise cache allocating the specified number of entries, each of
237 struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
239 if (cache == NULL) {
240 ERROR("Failed to allocate %s cache\n", name);
244 cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
245 if (cache->entry == NULL) {
246 ERROR("Failed to allocate %s cache\n", name);
250 cache->next_blk = 0;
251 cache->unused = entries;
252 cache->entries = entries;
253 cache->block_size = block_size;
254 cache->pages = block_size >> PAGE_CACHE_SHIFT;
255 cache->name = name;
256 cache->num_waiters = 0;
257 spin_lock_init(&cache->lock);
258 init_waitqueue_head(&cache->wait_queue);
261 struct squashfs_cache_entry *entry = &cache->entry[i];
263 init_waitqueue_head(&cache->entry[i].wait_queue);
264 entry->cache = cache;
266 entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
268 ERROR("Failed to allocate %s cache entry\n", name);
272 for (j = 0; j < cache->pages; j++) {
281 return cache;
284 squashfs_cache_delete(cache);
290 * Copy upto length bytes from cache entry to buffer starting at offset bytes
291 * into the cache entry. If there's not length bytes then copy the number of
367 * Look-up in the fragmment cache the fragment located at <start_block> in the
382 * filesystem. The cache is used here to avoid duplicating locking and