Lines Matching defs:cache

47 /* This code implements a small and *simple* DNS resolver cache.
49 * It is only used to cache DNS answers for a time defined by the smallest TTL
51 * to be a full DNS cache, since we plan to implement that in the future in a
63 * to keep an answer in the cache.
66 * (and should be solved by the later full DNS cache process).
72 * that a full DNS cache is expected to do.
76 * - the client calls _resolv_cache_get() to obtain a handle to the cache.
77 * this will initialize the cache on first usage. the result can be NULL
78 * if the cache is disabled.
87 * answer to the cache.
95 * - when network settings change, the cache must be flushed since the list
105 * that the cache is only flushed once per network change.
109 * this code is called if its value is "0", then the resolver cache is
118 /* default number of entries kept in the cache. This value has been
146 * * 2) we've made this a system-wide cache, so the cost is less (it's not
154 /* name of the system property that can be used to set the cache size */
165 /* set to 1 to debug cache operations */
417 * TTL : 32 : seconds to cache this RR (0=none)
600 * type of query we can cache, or 0 otherwise
1003 /* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
1027 * keep the answer in the cache.
1120 /* allocate a new entry as a cache node */
1196 Cache* cache;
1205 _cache_flush_pending_requests_locked( struct resolv_cache* cache )
1208 if (cache) {
1209 ri = cache->pending_requests.next;
1220 cache->pending_requests.next = NULL;
1228 _cache_check_pending_request_locked( struct resolv_cache* cache, Entry* key )
1233 if (cache && key) {
1234 ri = cache->pending_requests.next;
1235 prev = &cache->pending_requests;
1255 int rv = pthread_cond_timedwait(&ri->cond, &cache->lock, &ts);
1263 * matching the key has been added to the cache */
1265 _cache_notify_waiting_tid_locked( struct resolv_cache* cache, Entry* key )
1269 if (cache && key) {
1270 ri = cache->pending_requests.next;
1271 prev = &cache->pending_requests;
1290 /* notify the cache that the query failed */
1292 _resolv_cache_query_failed( struct resolv_cache* cache,
1298 if (cache && entry_init_key(key, query, querylen)) {
1299 pthread_mutex_lock(&cache->lock);
1300 _cache_notify_waiting_tid_locked(cache, key);
1301 pthread_mutex_unlock(&cache->lock);
1306 _cache_flush_locked( Cache* cache )
1311 for (nn = 0; nn < cache->max_entries; nn++)
1313 Entry** pnode = (Entry**) &cache->entries[nn];
1323 _cache_flush_pending_requests_locked(cache);
1325 cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
1326 cache->num_entries = 0;
1327 cache->last_id = 0;
1334 /* Return max number of entries allowed in the cache,
1335 * i.e. cache size. The cache size is either defined
1348 // Don't use the cache in local mode. This is used by the
1351 XLOG("setup cache for non-cache process. size=1");
1364 XLOG("cache size: %d", result);
1371 struct resolv_cache* cache;
1373 cache = calloc(sizeof(*cache), 1);
1374 if (cache) {
1375 cache->max_entries = _res_cache_get_max_entries();
1376 cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
1377 if (cache->entries) {
1378 cache->generation = ~0U;
1379 pthread_mutex_init( &cache->lock, NULL );
1380 cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
1381 XLOG("%s: cache created\n", __FUNCTION__);
1383 free(cache);
1384 cache = NULL;
1387 return cache;
1404 _cache_dump_mru( Cache* cache )
1409 p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
1410 for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
1473 _cache_lookup_p( Cache* cache,
1476 int index = key->hash % cache->max_entries;
1477 Entry** pnode = (Entry**) &cache->entries[ index ];
1499 _cache_add_p( Cache* cache,
1504 e->id = ++cache->last_id;
1505 entry_mru_add(e, &cache->mru_list);
1506 cache->num_entries += 1;
1509 e->id, cache->num_entries);
1517 _cache_remove_p( Cache* cache,
1523 e->id, cache->num_entries-1);
1528 cache->num_entries -= 1;
1534 _cache_remove_oldest( Cache* cache )
1536 Entry* oldest = cache->mru_list.mru_prev;
1537 Entry** lookup = _cache_lookup_p(cache, oldest);
1547 _cache_remove_p(cache, lookup);
1552 static void _cache_remove_expired(Cache* cache) {
1556 for (e = cache->mru_list.mru_next; e != &cache->mru_list;) {
1559 Entry** lookup = _cache_lookup_p(cache, e);
1565 _cache_remove_p(cache, lookup);
1573 _resolv_cache_lookup( struct resolv_cache* cache,
1592 /* we don't cache malformed queries */
1597 /* lookup cache */
1598 pthread_mutex_lock( &cache->lock );
1603 lookup = _cache_lookup_p(cache, key);
1610 if (!_cache_check_pending_request_locked(cache, key)) {
1613 lookup = _cache_lookup_p(cache, key);
1627 _cache_remove_p(cache, lookup);
1642 if (e != cache->mru_list.mru_next) {
1644 entry_mru_add( e, &cache->mru_list );
1651 pthread_mutex_unlock( &cache->lock );
1657 _resolv_cache_add( struct resolv_cache* cache,
1675 pthread_mutex_lock( &cache->lock );
1685 lookup = _cache_lookup_p(cache, key);
1694 if (cache->num_entries >= cache->max_entries) {
1695 _cache_remove_expired(cache);
1696 if (cache->num_entries >= cache->max_entries) {
1697 _cache_remove_oldest(cache);
1700 lookup = _cache_lookup_p(cache, key);
1714 _cache_add_p(cache, lookup, e);
1718 _cache_dump_mru(cache);
1721 _cache_notify_waiting_tid_locked(cache, key);
1722 pthread_mutex_unlock( &cache->lock );
1751 /* gets cache associated with an interface name, or NULL if none exists */
1757 /* look up the named cache, and creates one if needed */
1759 /* empty the named cache */
1761 /* empty the nameservers set for the named cache */
1778 /* the cache is disabled */
1790 struct resolv_cache *cache;
1798 // if default interface not set then use the first cache
1811 cache = _get_res_cache_for_iface_locked(ifname);
1815 return cache;
1824 struct resolv_cache* cache = _find_named_cache_locked(ifname);
1825 if (!cache) {
1828 cache = _resolv_cache_create();
1829 if (cache) {
1831 cache_info->cache = cache;
1841 return cache;
1853 // if default interface not set then use the first cache
1856 // method will be deleted/obsolete when cache per interface
1869 struct resolv_cache* cache = _get_res_cache_for_iface_locked(ifname);
1871 if (cache != NULL) {
1872 pthread_mutex_lock( &cache->lock );
1873 if (cache->generation != generation) {
1874 _cache_flush_locked(cache);
1875 cache->generation = generation;
1877 pthread_mutex_unlock( &cache->lock );
1911 struct resolv_cache* cache = _find_named_cache_locked(ifname);
1912 if (cache) {
1913 pthread_mutex_lock(&cache->lock);
1914 _cache_flush_locked(cache);
1915 pthread_mutex_unlock(&cache->lock);
1944 if (info != NULL) return info->cache;
2001 // creates the cache if not created