Lines Matching refs:cache

34  * This file implements a simple static state cache for 965.  The
38 * the cache may not have relocations (pointers to other BOs) in them.
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
82 search_cache(struct brw_cache *cache, GLuint hash,
90 for (c = cache->items[hash % cache->size]; c; c = c->next)
93 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
94 cache->size, bucketcount, cache->n_items);
97 for (c = cache->items[hash % cache->size]; c; c = c->next) {
107 rehash(struct brw_cache *cache)
113 size = cache->size * 3;
116 for (i = 0; i < cache->size; i++)
117 for (c = cache->items[i]; c; c = next) {
123 FREE(cache->items);
124 cache->items = items;
125 cache->size = size;
133 brw_search_cache(struct brw_cache *cache,
138 struct brw_context *brw = cache->brw;
149 item = search_cache(cache, hash, &lookup);
157 brw->state.dirty.cache |= (1 << cache_id);
165 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
167 struct brw_context *brw = cache->brw;
171 new_bo = drm_intel_bo_alloc(intel->bufmgr, "program cache", new_size, 64);
174 if (cache->next_offset != 0) {
175 drm_intel_bo_map(cache->bo, false);
176 drm_intel_bo_subdata(new_bo, 0, cache->next_offset, cache->bo->virtual);
177 drm_intel_bo_unmap(cache->bo);
180 drm_intel_bo_unreference(cache->bo);
181 cache->bo = new_bo;
182 cache->bo_used_by_gpu = false;
191 * Attempts to find an item in the cache with identical data and aux
195 brw_try_upload_using_copy(struct brw_cache *cache,
203 for (i = 0; i < cache->size; i++) {
204 for (item = cache->items[i]; item; item = item->next) {
218 drm_intel_bo_map(cache->bo, false);
219 ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
220 drm_intel_bo_unmap(cache->bo);
234 brw_upload_item_data(struct brw_cache *cache,
238 /* Allocate space in the cache BO for our new program. */
239 if (cache->next_offset + item->size > cache->bo->size) {
240 uint32_t new_size = cache->bo->size * 2;
242 while (cache->next_offset + item->size > new_size)
245 brw_cache_new_bo(cache, new_size);
251 if (cache->bo_used_by_gpu) {
252 brw_cache_new_bo(cache, cache->bo->size);
255 item->offset = cache->next_offset;
258 cache->next_offset = ALIGN(item->offset + item->size, 64);
262 brw_upload_cache(struct brw_cache *cache,
285 /* If we can find a matching prog/prog_data combo in the cache
292 if (!brw_try_upload_using_copy(cache, item, data, aux)) {
293 brw_upload_item_data(cache, item, data);
304 if (cache->n_items > cache->size * 1.5)
305 rehash(cache);
307 hash %= cache->size;
308 item->next = cache->items[hash];
309 cache->items[hash] = item;
310 cache->n_items++;
313 drm_intel_bo_subdata(cache->bo, item->offset, data_size, data);
317 cache->brw->state.dirty.cache |= 1 << cache_id;
324 struct brw_cache *cache = &brw->cache;
326 cache->brw = brw;
328 cache->size = 7;
329 cache->n_items = 0;
330 cache->items = (struct brw_cache_item **)
331 calloc(1, cache->size * sizeof(struct brw_cache_item));
333 cache->bo = drm_intel_bo_alloc(intel->bufmgr,
334 "program cache",
339 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
347 for (i = 0; i < cache->size; i++) {
348 for (c = cache->items[i]; c; c = next) {
353 cache->items[i] = NULL;
356 cache->n_items = 0;
361 cache->next_offset = 0;
368 brw->state.dirty.cache |= ~0;
376 * state cache.
378 if (brw->cache.n_items > 2000) {
379 perf_debug("Exceeded state cache size limit. Clearing the set "
381 brw_clear_cache(brw, &brw->cache);
387 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
392 drm_intel_bo_unreference(cache->bo);
393 cache->bo = NULL;
394 brw_clear_cache(brw, cache);
395 free(cache->items);
396 cache->items = NULL;
397 cache->size = 0;
404 brw_destroy_cache(brw, &brw->cache);