1d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org/*
2d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * Copyright (C) 2009 The Guava Authors
3d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org *
4d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * Licensed under the Apache License, Version 2.0 (the "License");
5d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * you may not use this file except in compliance with the License.
6d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * You may obtain a copy of the License at
7d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org *
8d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * http://www.apache.org/licenses/LICENSE-2.0
9d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org *
10d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * Unless required by applicable law or agreed to in writing, software
11d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * distributed under the License is distributed on an "AS IS" BASIS,
12d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * See the License for the specific language governing permissions and
14d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org * limitations under the License.
15d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org */
16d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org
17d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgpackage com.google.common.cache;
18d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org
19d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport static com.google.common.base.Preconditions.checkNotNull;
20d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport static com.google.common.base.Preconditions.checkState;
21d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport static com.google.common.cache.CacheBuilder.NULL_TICKER;
22d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport static com.google.common.cache.CacheBuilder.UNSET_INT;
23d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport static com.google.common.util.concurrent.Uninterruptibles.getUninterruptibly;
24d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport static java.util.concurrent.TimeUnit.NANOSECONDS;
25d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org
26d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.annotations.VisibleForTesting;
27d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.base.Equivalence;
28d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.base.Equivalences;
29d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.base.Stopwatch;
30d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.base.Ticker;
31d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.cache.AbstractCache.SimpleStatsCounter;
32d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.cache.AbstractCache.StatsCounter;
33d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.cache.CacheBuilder.NullListener;
34d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.cache.CacheBuilder.OneWeigher;
35d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.cache.CacheLoader.InvalidCacheLoadException;
36d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.cache.CacheLoader.UnsupportedLoadingOperationException;
37d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.collect.AbstractLinkedIterator;
38d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.collect.ImmutableMap;
39d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.collect.Iterators;
40d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.collect.Maps;
41d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.collect.Sets;
42d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.primitives.Ints;
43d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.util.concurrent.ExecutionError;
44d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.util.concurrent.Futures;
45d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.util.concurrent.ListenableFuture;
46d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.util.concurrent.ListeningExecutorService;
47d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.util.concurrent.MoreExecutors;
48d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.util.concurrent.SettableFuture;
49d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport com.google.common.util.concurrent.UncheckedExecutionException;
50d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.org
51d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport java.io.IOException;
52d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport java.io.ObjectInputStream;
53d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport java.io.Serializable;
54d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport java.lang.ref.Reference;
55d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport java.lang.ref.ReferenceQueue;
56d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport java.lang.ref.SoftReference;
57d3c42109e5b85232d19beab8deeb24bdcbbf07f9danno@chromium.orgimport java.lang.ref.WeakReference;
58f9841897146bc10dbb3c45f0632bb79254602c75machenbach@chromium.orgimport java.util.AbstractCollection;
59import java.util.AbstractMap;
60import java.util.AbstractQueue;
61import java.util.AbstractSet;
62import java.util.Collection;
63import java.util.Iterator;
64import java.util.Map;
65import java.util.NoSuchElementException;
66import java.util.Queue;
67import java.util.Set;
68import java.util.concurrent.Callable;
69import java.util.concurrent.ConcurrentLinkedQueue;
70import java.util.concurrent.ConcurrentMap;
71import java.util.concurrent.ExecutionException;
72import java.util.concurrent.TimeUnit;
73import java.util.concurrent.atomic.AtomicInteger;
74import java.util.concurrent.atomic.AtomicReferenceArray;
75import java.util.concurrent.locks.ReentrantLock;
76import java.util.logging.Level;
77import java.util.logging.Logger;
78
79import javax.annotation.Nullable;
80import javax.annotation.concurrent.GuardedBy;
81
82/**
83 * The concurrent hash map implementation built by {@link CacheBuilder}.
84 *
85 * <p>This implementation is heavily derived from revision 1.96 of <a
86 * href="http://tinyurl.com/ConcurrentHashMap">ConcurrentHashMap.java</a>.
87 *
88 * @author Charles Fry
89 * @author Bob Lee ({@code com.google.common.collect.MapMaker})
90 * @author Doug Lea ({@code ConcurrentHashMap})
91 */
92class LocalCache<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> {
93
94  /*
95   * The basic strategy is to subdivide the table among Segments, each of which itself is a
96   * concurrently readable hash table. The map supports non-blocking reads and concurrent writes
97   * across different segments.
98   *
99   * If a maximum size is specified, a best-effort bounding is performed per segment, using a
100   * page-replacement algorithm to determine which entries to evict when the capacity has been
101   * exceeded.
102   *
103   * The page replacement algorithm's data structures are kept casually consistent with the map. The
104   * ordering of writes to a segment is sequentially consistent. An update to the map and recording
105   * of reads may not be immediately reflected on the algorithm's data structures. These structures
106   * are guarded by a lock and operations are applied in batches to avoid lock contention. The
107   * penalty of applying the batches is spread across threads so that the amortized cost is slightly
108   * higher than performing just the operation without enforcing the capacity constraint.
109   *
110   * This implementation uses a per-segment queue to record a memento of the additions, removals,
111   * and accesses that were performed on the map. The queue is drained on writes and when it exceeds
112   * its capacity threshold.
113   *
114   * The Least Recently Used page replacement algorithm was chosen due to its simplicity, high hit
115   * rate, and ability to be implemented with O(1) time complexity. The initial LRU implementation
116   * operates per-segment rather than globally for increased implementation simplicity. We expect
117   * the cache hit rate to be similar to that of a global LRU algorithm.
118   */
119
120  // Constants
121
122  /**
123   * The maximum capacity, used if a higher value is implicitly specified by either of the
124   * constructors with arguments. MUST be a power of two <= 1<<30 to ensure that entries are
125   * indexable using ints.
126   */
127  static final int MAXIMUM_CAPACITY = 1 << 30;
128
129  /** The maximum number of segments to allow; used to bound constructor arguments. */
130  static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
131
132  /** Number of (unsynchronized) retries in the containsValue method. */
133  static final int CONTAINS_VALUE_RETRIES = 3;
134
135  /**
136   * Number of cache access operations that can be buffered per segment before the cache's recency
137   * ordering information is updated. This is used to avoid lock contention by recording a memento
138   * of reads and delaying a lock acquisition until the threshold is crossed or a mutation occurs.
139   *
140   * <p>This must be a (2^n)-1 as it is used as a mask.
141   */
142  static final int DRAIN_THRESHOLD = 0x3F;
143
144  /**
145   * Maximum number of entries to be drained in a single cleanup run. This applies independently to
146   * the cleanup queue and both reference queues.
147   */
148  // TODO(fry): empirically optimize this
149  static final int DRAIN_MAX = 16;
150
151  // Fields
152
153  static final Logger logger = Logger.getLogger(LocalCache.class.getName());
154
155  static final ListeningExecutorService sameThreadExecutor = MoreExecutors.sameThreadExecutor();
156
157  /**
158   * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose
159   * the segment.
160   */
161  final int segmentMask;
162
163  /**
164   * Shift value for indexing within segments. Helps prevent entries that end up in the same segment
165   * from also ending up in the same bucket.
166   */
167  final int segmentShift;
168
169  /** The segments, each of which is a specialized hash table. */
170  final Segment<K, V>[] segments;
171
172  /** The concurrency level. */
173  final int concurrencyLevel;
174
175  /** Strategy for comparing keys. */
176  final Equivalence<Object> keyEquivalence;
177
178  /** Strategy for comparing values. */
179  final Equivalence<Object> valueEquivalence;
180
181  /** Strategy for referencing keys. */
182  final Strength keyStrength;
183
184  /** Strategy for referencing values. */
185  final Strength valueStrength;
186
187  /** The maximum weight of this map. UNSET_INT if there is no maximum. */
188  final long maxWeight;
189
190  /** Weigher to weigh cache entries. */
191  final Weigher<K, V> weigher;
192
193  /** How long after the last access to an entry the map will retain that entry. */
194  final long expireAfterAccessNanos;
195
196  /** How long after the last write to an entry the map will retain that entry. */
197  final long expireAfterWriteNanos;
198
199  /** How long after the last write an entry becomes a candidate for refresh. */
200  final long refreshNanos;
201
202  /** Entries waiting to be consumed by the removal listener. */
203  // TODO(fry): define a new type which creates event objects and automates the clear logic
204  final Queue<RemovalNotification<K, V>> removalNotificationQueue;
205
206  /**
207   * A listener that is invoked when an entry is removed due to expiration or garbage collection of
208   * soft/weak entries.
209   */
210  final RemovalListener<K, V> removalListener;
211
212  /** Measures time in a testable way. */
213  final Ticker ticker;
214
215  /** Factory used to create new entries. */
216  final EntryFactory entryFactory;
217
218  /**
219   * Accumulates global cache statistics. Note that there are also per-segments stats counters
220   * which must be aggregated to obtain a global stats view.
221   */
222  final StatsCounter globalStatsCounter;
223
224  /**
225   * The default cache loader to use on loading operations.
226   */
227  @Nullable
228  final CacheLoader<? super K, V> defaultLoader;
229
230  /**
231   * Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
232   */
233  LocalCache(CacheBuilder<? super K, ? super V> builder, CacheLoader<? super K, V> loader) {
234    concurrencyLevel = Math.min(builder.getConcurrencyLevel(), MAX_SEGMENTS);
235
236    keyStrength = builder.getKeyStrength();
237    valueStrength = builder.getValueStrength();
238
239    keyEquivalence = builder.getKeyEquivalence();
240    valueEquivalence = builder.getValueEquivalence();
241
242    maxWeight = builder.getMaximumWeight();
243    weigher = builder.getWeigher();
244    expireAfterAccessNanos = builder.getExpireAfterAccessNanos();
245    expireAfterWriteNanos = builder.getExpireAfterWriteNanos();
246    refreshNanos = builder.getRefreshNanos();
247
248    removalListener = builder.getRemovalListener();
249    removalNotificationQueue = (removalListener == NullListener.INSTANCE)
250        ? LocalCache.<RemovalNotification<K, V>>discardingQueue()
251        : new ConcurrentLinkedQueue<RemovalNotification<K, V>>();
252
253    ticker = builder.getTicker(recordsTime());
254    entryFactory = EntryFactory.getFactory(keyStrength, usesAccessEntries(), usesWriteEntries());
255    globalStatsCounter = builder.getStatsCounterSupplier().get();
256    defaultLoader = loader;
257
258    int initialCapacity = Math.min(builder.getInitialCapacity(), MAXIMUM_CAPACITY);
259    if (evictsBySize() && !customWeigher()) {
260      initialCapacity = Math.min(initialCapacity, (int) maxWeight);
261    }
262
263    // Find power-of-two sizes best matching arguments. Constraints:
264    // (segmentCount <= maxWeight)
265    // && (concurrencyLevel > maxWeight || segmentCount > concurrencyLevel)
266    int segmentShift = 0;
267    int segmentCount = 1;
268    while (segmentCount < concurrencyLevel
269        && (!evictsBySize() || customWeigher() || segmentCount * 2 <= maxWeight)) {
270      ++segmentShift;
271      segmentCount <<= 1;
272    }
273    this.segmentShift = 32 - segmentShift;
274    segmentMask = segmentCount - 1;
275
276    this.segments = newSegmentArray(segmentCount);
277
278    int segmentCapacity = initialCapacity / segmentCount;
279    if (segmentCapacity * segmentCount < initialCapacity) {
280      ++segmentCapacity;
281    }
282
283    int segmentSize = 1;
284    while (segmentSize < segmentCapacity) {
285      segmentSize <<= 1;
286    }
287
288    if (evictsBySize()) {
289      // Ensure sum of segment max weights = overall max weights
290      long maxSegmentWeight = maxWeight / segmentCount + 1;
291      long remainder = maxWeight % segmentCount;
292      for (int i = 0; i < this.segments.length; ++i) {
293        if (i == remainder) {
294          maxSegmentWeight--;
295        }
296        this.segments[i] =
297            createSegment(segmentSize, maxSegmentWeight, builder.getStatsCounterSupplier().get());
298      }
299    } else {
300      for (int i = 0; i < this.segments.length; ++i) {
301        this.segments[i] =
302            createSegment(segmentSize, UNSET_INT, builder.getStatsCounterSupplier().get());
303      }
304    }
305  }
306
307  boolean evictsBySize() {
308    return maxWeight >= 0;
309  }
310
311  boolean customWeigher() {
312    return weigher != OneWeigher.INSTANCE;
313  }
314
315  boolean expires() {
316    return expiresAfterWrite() || expiresAfterAccess();
317  }
318
319  boolean expiresAfterWrite() {
320    return expireAfterWriteNanos > 0;
321  }
322
323  boolean expiresAfterAccess() {
324    return expireAfterAccessNanos > 0;
325  }
326
327  boolean refreshes() {
328    return refreshNanos > 0;
329  }
330
331  boolean usesAccessQueue() {
332    return expiresAfterAccess() || evictsBySize();
333  }
334
335  boolean usesWriteQueue() {
336    return expiresAfterWrite();
337  }
338
339  boolean recordsWrite() {
340    return expiresAfterWrite() || refreshes();
341  }
342
343  boolean recordsAccess() {
344    return expiresAfterAccess();
345  }
346
347  boolean recordsTime() {
348    return recordsWrite() || recordsAccess();
349  }
350
351  boolean usesWriteEntries() {
352    return usesWriteQueue() || recordsWrite();
353  }
354
355  boolean usesAccessEntries() {
356    return usesAccessQueue() || recordsAccess();
357  }
358
359  boolean usesKeyReferences() {
360    return keyStrength != Strength.STRONG;
361  }
362
363  boolean usesValueReferences() {
364    return valueStrength != Strength.STRONG;
365  }
366
367  enum Strength {
368    /*
369     * TODO(kevinb): If we strongly reference the value and aren't loading, we needn't wrap the
370     * value. This could save ~8 bytes per entry.
371     */
372
373    STRONG {
374      @Override
375      <K, V> ValueReference<K, V> referenceValue(
376          Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) {
377        return (weight == 1)
378            ? new StrongValueReference<K, V>(value)
379            : new WeightedStrongValueReference<K, V>(value, weight);
380      }
381
382      @Override
383      Equivalence<Object> defaultEquivalence() {
384        return Equivalences.equals();
385      }
386    },
387
388    SOFT {
389      @Override
390      <K, V> ValueReference<K, V> referenceValue(
391          Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) {
392        return (weight == 1)
393            ? new SoftValueReference<K, V>(segment.valueReferenceQueue, value, entry)
394            : new WeightedSoftValueReference<K, V>(
395                segment.valueReferenceQueue, value, entry, weight);
396      }
397
398      @Override
399      Equivalence<Object> defaultEquivalence() {
400        return Equivalences.identity();
401      }
402    },
403
404    WEAK {
405      @Override
406      <K, V> ValueReference<K, V> referenceValue(
407          Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) {
408        return (weight == 1)
409            ? new WeakValueReference<K, V>(segment.valueReferenceQueue, value, entry)
410            : new WeightedWeakValueReference<K, V>(
411                segment.valueReferenceQueue, value, entry, weight);
412      }
413
414      @Override
415      Equivalence<Object> defaultEquivalence() {
416        return Equivalences.identity();
417      }
418    };
419
420    /**
421     * Creates a reference for the given value according to this value strength.
422     */
423    abstract <K, V> ValueReference<K, V> referenceValue(
424        Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight);
425
426    /**
427     * Returns the default equivalence strategy used to compare and hash keys or values referenced
428     * at this strength. This strategy will be used unless the user explicitly specifies an
429     * alternate strategy.
430     */
431    abstract Equivalence<Object> defaultEquivalence();
432  }
433
434  /**
435   * Creates new entries.
436   */
437  enum EntryFactory {
438    STRONG {
439      @Override
440      <K, V> ReferenceEntry<K, V> newEntry(
441          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
442        return new StrongEntry<K, V>(key, hash, next);
443      }
444    },
445    STRONG_ACCESS {
446      @Override
447      <K, V> ReferenceEntry<K, V> newEntry(
448          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
449        return new StrongAccessEntry<K, V>(key, hash, next);
450      }
451
452      @Override
453      <K, V> ReferenceEntry<K, V> copyEntry(
454          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
455        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
456        copyAccessEntry(original, newEntry);
457        return newEntry;
458      }
459    },
460    STRONG_WRITE {
461      @Override
462      <K, V> ReferenceEntry<K, V> newEntry(
463          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
464        return new StrongWriteEntry<K, V>(key, hash, next);
465      }
466
467      @Override
468      <K, V> ReferenceEntry<K, V> copyEntry(
469          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
470        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
471        copyWriteEntry(original, newEntry);
472        return newEntry;
473      }
474    },
475    STRONG_ACCESS_WRITE {
476      @Override
477      <K, V> ReferenceEntry<K, V> newEntry(
478          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
479        return new StrongAccessWriteEntry<K, V>(key, hash, next);
480      }
481
482      @Override
483      <K, V> ReferenceEntry<K, V> copyEntry(
484          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
485        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
486        copyAccessEntry(original, newEntry);
487        copyWriteEntry(original, newEntry);
488        return newEntry;
489      }
490    },
491
492    WEAK {
493      @Override
494      <K, V> ReferenceEntry<K, V> newEntry(
495          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
496        return new WeakEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
497      }
498    },
499    WEAK_ACCESS {
500      @Override
501      <K, V> ReferenceEntry<K, V> newEntry(
502          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
503        return new WeakAccessEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
504      }
505
506      @Override
507      <K, V> ReferenceEntry<K, V> copyEntry(
508          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
509        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
510        copyAccessEntry(original, newEntry);
511        return newEntry;
512      }
513    },
514    WEAK_WRITE {
515      @Override
516      <K, V> ReferenceEntry<K, V> newEntry(
517          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
518        return new WeakWriteEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
519      }
520
521      @Override
522      <K, V> ReferenceEntry<K, V> copyEntry(
523          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
524        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
525        copyWriteEntry(original, newEntry);
526        return newEntry;
527      }
528    },
529    WEAK_ACCESS_WRITE {
530      @Override
531      <K, V> ReferenceEntry<K, V> newEntry(
532          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
533        return new WeakAccessWriteEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
534      }
535
536      @Override
537      <K, V> ReferenceEntry<K, V> copyEntry(
538          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
539        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
540        copyAccessEntry(original, newEntry);
541        copyWriteEntry(original, newEntry);
542        return newEntry;
543      }
544    };
545
546    /**
547     * Masks used to compute indices in the following table.
548     */
549    static final int ACCESS_MASK = 1;
550    static final int WRITE_MASK = 2;
551    static final int WEAK_MASK = 4;
552
553    /**
554     * Look-up table for factories.
555     */
556    static final EntryFactory[] factories = {
557      STRONG, STRONG_ACCESS, STRONG_WRITE, STRONG_ACCESS_WRITE,
558      WEAK, WEAK_ACCESS, WEAK_WRITE, WEAK_ACCESS_WRITE,
559    };
560
561    static EntryFactory getFactory(Strength keyStrength, boolean usesAccessQueue,
562        boolean usesWriteQueue) {
563      int flags = ((keyStrength == Strength.WEAK) ? WEAK_MASK : 0)
564          | (usesAccessQueue ? ACCESS_MASK : 0)
565          | (usesWriteQueue ? WRITE_MASK : 0);
566      return factories[flags];
567    }
568
569    /**
570     * Creates a new entry.
571     *
572     * @param segment to create the entry for
573     * @param key of the entry
574     * @param hash of the key
575     * @param next entry in the same bucket
576     */
577    abstract <K, V> ReferenceEntry<K, V> newEntry(
578        Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next);
579
580    /**
581     * Copies an entry, assigning it a new {@code next} entry.
582     *
583     * @param original the entry to copy
584     * @param newNext entry in the same bucket
585     */
586    @GuardedBy("Segment.this")
587    <K, V> ReferenceEntry<K, V> copyEntry(
588        Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
589      return newEntry(segment, original.getKey(), original.getHash(), newNext);
590    }
591
592    @GuardedBy("Segment.this")
593    <K, V> void copyAccessEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) {
594      // TODO(fry): when we link values instead of entries this method can go
595      // away, as can connectAccessOrder, nullifyAccessOrder.
596      newEntry.setAccessTime(original.getAccessTime());
597
598      connectAccessOrder(original.getPreviousInAccessQueue(), newEntry);
599      connectAccessOrder(newEntry, original.getNextInAccessQueue());
600
601      nullifyAccessOrder(original);
602    }
603
604    @GuardedBy("Segment.this")
605    <K, V> void copyWriteEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) {
606      // TODO(fry): when we link values instead of entries this method can go
607      // away, as can connectWriteOrder, nullifyWriteOrder.
608      newEntry.setWriteTime(original.getWriteTime());
609
610      connectWriteOrder(original.getPreviousInWriteQueue(), newEntry);
611      connectWriteOrder(newEntry, original.getNextInWriteQueue());
612
613      nullifyWriteOrder(original);
614    }
615  }
616
617  /**
618   * A reference to a value.
619   */
620  interface ValueReference<K, V> {
621    /**
622     * Returns the value. Does not block or throw exceptions.
623     */
624    @Nullable
625    V get();
626
627    /**
628     * Waits for a value that may still be loading. Unlike get(), this method can block (in the
629     * case of FutureValueReference).
630     *
631     * @throws ExecutionException if the loading thread throws an exception
632     * @throws ExecutionError if the loading thread throws an error
633     */
634    V waitForValue() throws ExecutionException;
635
636    /**
637     * Returns the weight of this entry. This is assumed to be static between calls to setValue.
638     */
639    int getWeight();
640
641    /**
642     * Returns the entry associated with this value reference, or {@code null} if this value
643     * reference is independent of any entry.
644     */
645    @Nullable
646    ReferenceEntry<K, V> getEntry();
647
648    /**
649     * Creates a copy of this reference for the given entry.
650     */
651    ValueReference<K, V> copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry);
652
653    /**
654     * Notifify pending loads that a new value was set. This is only relevant to loading
655     * value references.
656     */
657    void notifyNewValue(@Nullable V newValue);
658
659    /**
660     * Returns true if a new value is currently loading, regardless of whether or not there is an
661     * existing value. It is assumed that the return value of this method is constant for any given
662     * ValueReference instance.
663     */
664    boolean isLoading();
665
666    /**
667     * Returns true if this reference contains an active value, meaning one that is still considered
668     * present in the cache. Active values consist of live values, which are returned by cache
669     * lookups, and dead values, which have been evicted but awaiting removal. Non-active values
670     * consist strictly of loading values, though during refresh a value may be both active and
671     * loading.
672     */
673    boolean isActive();
674  }
675
676  /**
677   * Placeholder. Indicates that the value hasn't been set yet.
678   */
679  static final ValueReference<Object, Object> UNSET = new ValueReference<Object, Object>() {
680    @Override
681    public Object get() {
682      return null;
683    }
684
685    @Override
686    public int getWeight() {
687      return 0;
688    }
689
690    @Override
691    public ReferenceEntry<Object, Object> getEntry() {
692      return null;
693    }
694
695    @Override
696    public ValueReference<Object, Object> copyFor(
697        ReferenceQueue<Object> queue, ReferenceEntry<Object, Object> entry) {
698      return this;
699    }
700
701    @Override
702    public boolean isLoading() {
703      return false;
704    }
705
706    @Override
707    public boolean isActive() {
708      return false;
709    }
710
711    @Override
712    public Object waitForValue() {
713      return null;
714    }
715
716    @Override
717    public void notifyNewValue(Object newValue) {}
718  };
719
720  /**
721   * Singleton placeholder that indicates a value is being loaded.
722   */
723  @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
724  static <K, V> ValueReference<K, V> unset() {
725    return (ValueReference<K, V>) UNSET;
726  }
727
728  /**
729   * An entry in a reference map.
730   *
731   * Entries in the map can be in the following states:
732   *
733   * Valid:
734   * - Live: valid key/value are set
735   * - Loading: loading is pending
736   *
737   * Invalid:
738   * - Expired: time expired (key/value may still be set)
739   * - Collected: key/value was partially collected, but not yet cleaned up
740   * - Unset: marked as unset, awaiting cleanup or reuse
741   */
742  interface ReferenceEntry<K, V> {
743    /**
744     * Returns the value reference from this entry.
745     */
746    ValueReference<K, V> getValueReference();
747
748    /**
749     * Sets the value reference for this entry.
750     */
751    void setValueReference(ValueReference<K, V> valueReference);
752
753    /**
754     * Returns the next entry in the chain.
755     */
756    @Nullable
757    ReferenceEntry<K, V> getNext();
758
759    /**
760     * Returns the entry's hash.
761     */
762    int getHash();
763
764    /**
765     * Returns the key for this entry.
766     */
767    @Nullable
768    K getKey();
769
770    /*
771     * Used by entries that use access order. Access entries are maintained in a doubly-linked list.
772     * New entries are added at the tail of the list at write time; stale entries are expired from
773     * the head of the list.
774     */
775
776    /**
777     * Returns the time that this entry was last accessed, in ns.
778     */
779    long getAccessTime();
780
781    /**
782     * Sets the entry access time in ns.
783     */
784    void setAccessTime(long time);
785
786    /**
787     * Returns the next entry in the access queue.
788     */
789    ReferenceEntry<K, V> getNextInAccessQueue();
790
791    /**
792     * Sets the next entry in the access queue.
793     */
794    void setNextInAccessQueue(ReferenceEntry<K, V> next);
795
796    /**
797     * Returns the previous entry in the access queue.
798     */
799    ReferenceEntry<K, V> getPreviousInAccessQueue();
800
801    /**
802     * Sets the previous entry in the access queue.
803     */
804    void setPreviousInAccessQueue(ReferenceEntry<K, V> previous);
805
806    /*
807     * Implemented by entries that use write order. Write entries are maintained in a
808     * doubly-linked list. New entries are added at the tail of the list at write time and stale
809     * entries are expired from the head of the list.
810     */
811
812    /**
813     * Returns the time that this entry was last written, in ns.
814     */
815    long getWriteTime();
816
817    /**
818     * Sets the entry write time in ns.
819     */
820    void setWriteTime(long time);
821
822    /**
823     * Returns the next entry in the write queue.
824     */
825    ReferenceEntry<K, V> getNextInWriteQueue();
826
827    /**
828     * Sets the next entry in the write queue.
829     */
830    void setNextInWriteQueue(ReferenceEntry<K, V> next);
831
832    /**
833     * Returns the previous entry in the write queue.
834     */
835    ReferenceEntry<K, V> getPreviousInWriteQueue();
836
837    /**
838     * Sets the previous entry in the write queue.
839     */
840    void setPreviousInWriteQueue(ReferenceEntry<K, V> previous);
841  }
842
843  private enum NullEntry implements ReferenceEntry<Object, Object> {
844    INSTANCE;
845
846    @Override
847    public ValueReference<Object, Object> getValueReference() {
848      return null;
849    }
850
851    @Override
852    public void setValueReference(ValueReference<Object, Object> valueReference) {}
853
854    @Override
855    public ReferenceEntry<Object, Object> getNext() {
856      return null;
857    }
858
859    @Override
860    public int getHash() {
861      return 0;
862    }
863
864    @Override
865    public Object getKey() {
866      return null;
867    }
868
869    @Override
870    public long getAccessTime() {
871      return 0;
872    }
873
874    @Override
875    public void setAccessTime(long time) {}
876
877    @Override
878    public ReferenceEntry<Object, Object> getNextInAccessQueue() {
879      return this;
880    }
881
882    @Override
883    public void setNextInAccessQueue(ReferenceEntry<Object, Object> next) {}
884
885    @Override
886    public ReferenceEntry<Object, Object> getPreviousInAccessQueue() {
887      return this;
888    }
889
890    @Override
891    public void setPreviousInAccessQueue(ReferenceEntry<Object, Object> previous) {}
892
893    @Override
894    public long getWriteTime() {
895      return 0;
896    }
897
898    @Override
899    public void setWriteTime(long time) {}
900
901    @Override
902    public ReferenceEntry<Object, Object> getNextInWriteQueue() {
903      return this;
904    }
905
906    @Override
907    public void setNextInWriteQueue(ReferenceEntry<Object, Object> next) {}
908
909    @Override
910    public ReferenceEntry<Object, Object> getPreviousInWriteQueue() {
911      return this;
912    }
913
914    @Override
915    public void setPreviousInWriteQueue(ReferenceEntry<Object, Object> previous) {}
916  }
917
918  static abstract class AbstractReferenceEntry<K, V> implements ReferenceEntry<K, V> {
919    @Override
920    public ValueReference<K, V> getValueReference() {
921      throw new UnsupportedOperationException();
922    }
923
924    @Override
925    public void setValueReference(ValueReference<K, V> valueReference) {
926      throw new UnsupportedOperationException();
927    }
928
929    @Override
930    public ReferenceEntry<K, V> getNext() {
931      throw new UnsupportedOperationException();
932    }
933
934    @Override
935    public int getHash() {
936      throw new UnsupportedOperationException();
937    }
938
939    @Override
940    public K getKey() {
941      throw new UnsupportedOperationException();
942    }
943
944    @Override
945    public long getAccessTime() {
946      throw new UnsupportedOperationException();
947    }
948
949    @Override
950    public void setAccessTime(long time) {
951      throw new UnsupportedOperationException();
952    }
953
954    @Override
955    public ReferenceEntry<K, V> getNextInAccessQueue() {
956      throw new UnsupportedOperationException();
957    }
958
959    @Override
960    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
961      throw new UnsupportedOperationException();
962    }
963
964    @Override
965    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
966      throw new UnsupportedOperationException();
967    }
968
969    @Override
970    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
971      throw new UnsupportedOperationException();
972    }
973
974    @Override
975    public long getWriteTime() {
976      throw new UnsupportedOperationException();
977    }
978
979    @Override
980    public void setWriteTime(long time) {
981      throw new UnsupportedOperationException();
982    }
983
984    @Override
985    public ReferenceEntry<K, V> getNextInWriteQueue() {
986      throw new UnsupportedOperationException();
987    }
988
989    @Override
990    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
991      throw new UnsupportedOperationException();
992    }
993
994    @Override
995    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
996      throw new UnsupportedOperationException();
997    }
998
999    @Override
1000    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1001      throw new UnsupportedOperationException();
1002    }
1003  }
1004
1005  @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
1006  static <K, V> ReferenceEntry<K, V> nullEntry() {
1007    return (ReferenceEntry<K, V>) NullEntry.INSTANCE;
1008  }
1009
1010  static final Queue<? extends Object> DISCARDING_QUEUE = new AbstractQueue<Object>() {
1011    @Override
1012    public boolean offer(Object o) {
1013      return true;
1014    }
1015
1016    @Override
1017    public Object peek() {
1018      return null;
1019    }
1020
1021    @Override
1022    public Object poll() {
1023      return null;
1024    }
1025
1026    @Override
1027    public int size() {
1028      return 0;
1029    }
1030
1031    @Override
1032    public Iterator<Object> iterator() {
1033      return Iterators.emptyIterator();
1034    }
1035  };
1036
1037  /**
1038   * Queue that discards all elements.
1039   */
1040  @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
1041  static <E> Queue<E> discardingQueue() {
1042    return (Queue) DISCARDING_QUEUE;
1043  }
1044
1045  /*
1046   * Note: All of this duplicate code sucks, but it saves a lot of memory. If only Java had mixins!
1047   * To maintain this code, make a change for the strong reference type. Then, cut and paste, and
1048   * replace "Strong" with "Soft" or "Weak" within the pasted text. The primary difference is that
1049   * strong entries store the key reference directly while soft and weak entries delegate to their
1050   * respective superclasses.
1051   */
1052
1053  /**
1054   * Used for strongly-referenced keys.
1055   */
1056  static class StrongEntry<K, V> implements ReferenceEntry<K, V> {
1057    final K key;
1058
1059    StrongEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1060      this.key = key;
1061      this.hash = hash;
1062      this.next = next;
1063    }
1064
1065    @Override
1066    public K getKey() {
1067      return this.key;
1068    }
1069
1070    // null access
1071
1072    @Override
1073    public long getAccessTime() {
1074      throw new UnsupportedOperationException();
1075    }
1076
1077    @Override
1078    public void setAccessTime(long time) {
1079      throw new UnsupportedOperationException();
1080    }
1081
1082    @Override
1083    public ReferenceEntry<K, V> getNextInAccessQueue() {
1084      throw new UnsupportedOperationException();
1085    }
1086
1087    @Override
1088    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1089      throw new UnsupportedOperationException();
1090    }
1091
1092    @Override
1093    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1094      throw new UnsupportedOperationException();
1095    }
1096
1097    @Override
1098    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1099      throw new UnsupportedOperationException();
1100    }
1101
1102    // null write
1103
1104    @Override
1105    public long getWriteTime() {
1106      throw new UnsupportedOperationException();
1107    }
1108
1109    @Override
1110    public void setWriteTime(long time) {
1111      throw new UnsupportedOperationException();
1112    }
1113
1114    @Override
1115    public ReferenceEntry<K, V> getNextInWriteQueue() {
1116      throw new UnsupportedOperationException();
1117    }
1118
1119    @Override
1120    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1121      throw new UnsupportedOperationException();
1122    }
1123
1124    @Override
1125    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1126      throw new UnsupportedOperationException();
1127    }
1128
1129    @Override
1130    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1131      throw new UnsupportedOperationException();
1132    }
1133
1134    // The code below is exactly the same for each entry type.
1135
1136    final int hash;
1137    final ReferenceEntry<K, V> next;
1138    volatile ValueReference<K, V> valueReference = unset();
1139
1140    @Override
1141    public ValueReference<K, V> getValueReference() {
1142      return valueReference;
1143    }
1144
1145    @Override
1146    public void setValueReference(ValueReference<K, V> valueReference) {
1147      this.valueReference = valueReference;
1148    }
1149
1150    @Override
1151    public int getHash() {
1152      return hash;
1153    }
1154
1155    @Override
1156    public ReferenceEntry<K, V> getNext() {
1157      return next;
1158    }
1159  }
1160
1161  static final class StrongAccessEntry<K, V> extends StrongEntry<K, V>
1162      implements ReferenceEntry<K, V> {
1163    StrongAccessEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1164      super(key, hash, next);
1165    }
1166
1167    // The code below is exactly the same for each access entry type.
1168
1169    volatile long accessTime = Long.MAX_VALUE;
1170
1171    @Override
1172    public long getAccessTime() {
1173      return accessTime;
1174    }
1175
1176    @Override
1177    public void setAccessTime(long time) {
1178      this.accessTime = time;
1179    }
1180
1181    @GuardedBy("Segment.this")
1182    ReferenceEntry<K, V> nextAccess = nullEntry();
1183
1184    @Override
1185    public ReferenceEntry<K, V> getNextInAccessQueue() {
1186      return nextAccess;
1187    }
1188
1189    @Override
1190    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1191      this.nextAccess = next;
1192    }
1193
1194    @GuardedBy("Segment.this")
1195    ReferenceEntry<K, V> previousAccess = nullEntry();
1196
1197    @Override
1198    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1199      return previousAccess;
1200    }
1201
1202    @Override
1203    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1204      this.previousAccess = previous;
1205    }
1206  }
1207
1208  static final class StrongWriteEntry<K, V>
1209      extends StrongEntry<K, V> implements ReferenceEntry<K, V> {
1210    StrongWriteEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1211      super(key, hash, next);
1212    }
1213
1214    // The code below is exactly the same for each write entry type.
1215
1216    volatile long writeTime = Long.MAX_VALUE;
1217
1218    @Override
1219    public long getWriteTime() {
1220      return writeTime;
1221    }
1222
1223    @Override
1224    public void setWriteTime(long time) {
1225      this.writeTime = time;
1226    }
1227
1228    @GuardedBy("Segment.this")
1229    ReferenceEntry<K, V> nextWrite = nullEntry();
1230
1231    @Override
1232    public ReferenceEntry<K, V> getNextInWriteQueue() {
1233      return nextWrite;
1234    }
1235
1236    @Override
1237    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1238      this.nextWrite = next;
1239    }
1240
1241    @GuardedBy("Segment.this")
1242    ReferenceEntry<K, V> previousWrite = nullEntry();
1243
1244    @Override
1245    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1246      return previousWrite;
1247    }
1248
1249    @Override
1250    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1251      this.previousWrite = previous;
1252    }
1253  }
1254
1255  static final class StrongAccessWriteEntry<K, V>
1256      extends StrongEntry<K, V> implements ReferenceEntry<K, V> {
1257    StrongAccessWriteEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1258      super(key, hash, next);
1259    }
1260
1261    // The code below is exactly the same for each access entry type.
1262
1263    volatile long accessTime = Long.MAX_VALUE;
1264
1265    @Override
1266    public long getAccessTime() {
1267      return accessTime;
1268    }
1269
1270    @Override
1271    public void setAccessTime(long time) {
1272      this.accessTime = time;
1273    }
1274
1275    @GuardedBy("Segment.this")
1276    ReferenceEntry<K, V> nextAccess = nullEntry();
1277
1278    @Override
1279    public ReferenceEntry<K, V> getNextInAccessQueue() {
1280      return nextAccess;
1281    }
1282
1283    @Override
1284    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1285      this.nextAccess = next;
1286    }
1287
1288    @GuardedBy("Segment.this")
1289    ReferenceEntry<K, V> previousAccess = nullEntry();
1290
1291    @Override
1292    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1293      return previousAccess;
1294    }
1295
1296    @Override
1297    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1298      this.previousAccess = previous;
1299    }
1300
1301    // The code below is exactly the same for each write entry type.
1302
1303    volatile long writeTime = Long.MAX_VALUE;
1304
1305    @Override
1306    public long getWriteTime() {
1307      return writeTime;
1308    }
1309
1310    @Override
1311    public void setWriteTime(long time) {
1312      this.writeTime = time;
1313    }
1314
1315    @GuardedBy("Segment.this")
1316    ReferenceEntry<K, V> nextWrite = nullEntry();
1317
1318    @Override
1319    public ReferenceEntry<K, V> getNextInWriteQueue() {
1320      return nextWrite;
1321    }
1322
1323    @Override
1324    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1325      this.nextWrite = next;
1326    }
1327
1328    @GuardedBy("Segment.this")
1329    ReferenceEntry<K, V> previousWrite = nullEntry();
1330
1331    @Override
1332    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1333      return previousWrite;
1334    }
1335
1336    @Override
1337    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1338      this.previousWrite = previous;
1339    }
1340  }
1341
1342  /**
1343   * Used for weakly-referenced keys.
1344   */
1345  static class WeakEntry<K, V> extends WeakReference<K> implements ReferenceEntry<K, V> {
1346    WeakEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1347      super(key, queue);
1348      this.hash = hash;
1349      this.next = next;
1350    }
1351
1352    @Override
1353    public K getKey() {
1354      return get();
1355    }
1356
1357    // null access
1358
1359    @Override
1360    public long getAccessTime() {
1361      throw new UnsupportedOperationException();
1362    }
1363
1364    @Override
1365    public void setAccessTime(long time) {
1366      throw new UnsupportedOperationException();
1367    }
1368
1369    @Override
1370    public ReferenceEntry<K, V> getNextInAccessQueue() {
1371      throw new UnsupportedOperationException();
1372    }
1373
1374    @Override
1375    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1376      throw new UnsupportedOperationException();
1377    }
1378
1379    @Override
1380    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1381      throw new UnsupportedOperationException();
1382    }
1383
1384    @Override
1385    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1386      throw new UnsupportedOperationException();
1387    }
1388
1389    // null write
1390
1391    @Override
1392    public long getWriteTime() {
1393      throw new UnsupportedOperationException();
1394    }
1395
1396    @Override
1397    public void setWriteTime(long time) {
1398      throw new UnsupportedOperationException();
1399    }
1400
1401    @Override
1402    public ReferenceEntry<K, V> getNextInWriteQueue() {
1403      throw new UnsupportedOperationException();
1404    }
1405
1406    @Override
1407    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1408      throw new UnsupportedOperationException();
1409    }
1410
1411    @Override
1412    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1413      throw new UnsupportedOperationException();
1414    }
1415
1416    @Override
1417    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1418      throw new UnsupportedOperationException();
1419    }
1420
1421    // The code below is exactly the same for each entry type.
1422
1423    final int hash;
1424    final ReferenceEntry<K, V> next;
1425    volatile ValueReference<K, V> valueReference = unset();
1426
1427    @Override
1428    public ValueReference<K, V> getValueReference() {
1429      return valueReference;
1430    }
1431
1432    @Override
1433    public void setValueReference(ValueReference<K, V> valueReference) {
1434      this.valueReference = valueReference;
1435    }
1436
1437    @Override
1438    public int getHash() {
1439      return hash;
1440    }
1441
1442    @Override
1443    public ReferenceEntry<K, V> getNext() {
1444      return next;
1445    }
1446  }
1447
1448  static final class WeakAccessEntry<K, V>
1449      extends WeakEntry<K, V> implements ReferenceEntry<K, V> {
1450    WeakAccessEntry(
1451        ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1452      super(queue, key, hash, next);
1453    }
1454
1455    // The code below is exactly the same for each access entry type.
1456
1457    volatile long accessTime = Long.MAX_VALUE;
1458
1459    @Override
1460    public long getAccessTime() {
1461      return accessTime;
1462    }
1463
1464    @Override
1465    public void setAccessTime(long time) {
1466      this.accessTime = time;
1467    }
1468
1469    @GuardedBy("Segment.this")
1470    ReferenceEntry<K, V> nextAccess = nullEntry();
1471
1472    @Override
1473    public ReferenceEntry<K, V> getNextInAccessQueue() {
1474      return nextAccess;
1475    }
1476
1477    @Override
1478    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1479      this.nextAccess = next;
1480    }
1481
1482    @GuardedBy("Segment.this")
1483    ReferenceEntry<K, V> previousAccess = nullEntry();
1484
1485    @Override
1486    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1487      return previousAccess;
1488    }
1489
1490    @Override
1491    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1492      this.previousAccess = previous;
1493    }
1494  }
1495
1496  static final class WeakWriteEntry<K, V>
1497      extends WeakEntry<K, V> implements ReferenceEntry<K, V> {
1498    WeakWriteEntry(
1499        ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1500      super(queue, key, hash, next);
1501    }
1502
1503    // The code below is exactly the same for each write entry type.
1504
1505    volatile long writeTime = Long.MAX_VALUE;
1506
1507    @Override
1508    public long getWriteTime() {
1509      return writeTime;
1510    }
1511
1512    @Override
1513    public void setWriteTime(long time) {
1514      this.writeTime = time;
1515    }
1516
1517    @GuardedBy("Segment.this")
1518    ReferenceEntry<K, V> nextWrite = nullEntry();
1519
1520    @Override
1521    public ReferenceEntry<K, V> getNextInWriteQueue() {
1522      return nextWrite;
1523    }
1524
1525    @Override
1526    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1527      this.nextWrite = next;
1528    }
1529
1530    @GuardedBy("Segment.this")
1531    ReferenceEntry<K, V> previousWrite = nullEntry();
1532
1533    @Override
1534    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1535      return previousWrite;
1536    }
1537
1538    @Override
1539    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1540      this.previousWrite = previous;
1541    }
1542  }
1543
1544  static final class WeakAccessWriteEntry<K, V>
1545      extends WeakEntry<K, V> implements ReferenceEntry<K, V> {
1546    WeakAccessWriteEntry(
1547        ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1548      super(queue, key, hash, next);
1549    }
1550
1551    // The code below is exactly the same for each access entry type.
1552
1553    volatile long accessTime = Long.MAX_VALUE;
1554
1555    @Override
1556    public long getAccessTime() {
1557      return accessTime;
1558    }
1559
1560    @Override
1561    public void setAccessTime(long time) {
1562      this.accessTime = time;
1563    }
1564
1565    @GuardedBy("Segment.this")
1566    ReferenceEntry<K, V> nextAccess = nullEntry();
1567
1568    @Override
1569    public ReferenceEntry<K, V> getNextInAccessQueue() {
1570      return nextAccess;
1571    }
1572
1573    @Override
1574    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1575      this.nextAccess = next;
1576    }
1577
1578    @GuardedBy("Segment.this")
1579    ReferenceEntry<K, V> previousAccess = nullEntry();
1580
1581    @Override
1582    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1583      return previousAccess;
1584    }
1585
1586    @Override
1587    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1588      this.previousAccess = previous;
1589    }
1590
1591    // The code below is exactly the same for each write entry type.
1592
1593    volatile long writeTime = Long.MAX_VALUE;
1594
1595    @Override
1596    public long getWriteTime() {
1597      return writeTime;
1598    }
1599
1600    @Override
1601    public void setWriteTime(long time) {
1602      this.writeTime = time;
1603    }
1604
1605    @GuardedBy("Segment.this")
1606    ReferenceEntry<K, V> nextWrite = nullEntry();
1607
1608    @Override
1609    public ReferenceEntry<K, V> getNextInWriteQueue() {
1610      return nextWrite;
1611    }
1612
1613    @Override
1614    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1615      this.nextWrite = next;
1616    }
1617
1618    @GuardedBy("Segment.this")
1619    ReferenceEntry<K, V> previousWrite = nullEntry();
1620
1621    @Override
1622    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1623      return previousWrite;
1624    }
1625
1626    @Override
1627    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1628      this.previousWrite = previous;
1629    }
1630  }
1631
1632  /**
1633   * References a weak value.
1634   */
1635  static class WeakValueReference<K, V>
1636      extends WeakReference<V> implements ValueReference<K, V> {
1637    final ReferenceEntry<K, V> entry;
1638
1639    WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) {
1640      super(referent, queue);
1641      this.entry = entry;
1642    }
1643
1644    @Override
1645    public int getWeight() {
1646      return 1;
1647    }
1648
1649    @Override
1650    public ReferenceEntry<K, V> getEntry() {
1651      return entry;
1652    }
1653
1654    @Override
1655    public void notifyNewValue(V newValue) {}
1656
1657    @Override
1658    public ValueReference<K, V> copyFor(
1659        ReferenceQueue<V> queue, ReferenceEntry<K, V> entry) {
1660      return new WeakValueReference<K, V>(queue, get(), entry);
1661    }
1662
1663    @Override
1664    public boolean isLoading() {
1665      return false;
1666    }
1667
1668    @Override
1669    public boolean isActive() {
1670      return true;
1671    }
1672
1673    @Override
1674    public V waitForValue() {
1675      return get();
1676    }
1677  }
1678
1679  /**
1680   * References a soft value.
1681   */
1682  static class SoftValueReference<K, V>
1683      extends SoftReference<V> implements ValueReference<K, V> {
1684    final ReferenceEntry<K, V> entry;
1685
1686    SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) {
1687      super(referent, queue);
1688      this.entry = entry;
1689    }
1690
1691    @Override
1692    public int getWeight() {
1693      return 1;
1694    }
1695
1696    @Override
1697    public ReferenceEntry<K, V> getEntry() {
1698      return entry;
1699    }
1700
1701    @Override
1702    public void notifyNewValue(V newValue) {}
1703
1704    @Override
1705    public ValueReference<K, V> copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry) {
1706      return new SoftValueReference<K, V>(queue, get(), entry);
1707    }
1708
1709    @Override
1710    public boolean isLoading() {
1711      return false;
1712    }
1713
1714    @Override
1715    public boolean isActive() {
1716      return true;
1717    }
1718
1719    @Override
1720    public V waitForValue() {
1721      return get();
1722    }
1723  }
1724
1725  /**
1726   * References a strong value.
1727   */
1728  static class StrongValueReference<K, V> implements ValueReference<K, V> {
1729    final V referent;
1730
1731    StrongValueReference(V referent) {
1732      this.referent = referent;
1733    }
1734
1735    @Override
1736    public V get() {
1737      return referent;
1738    }
1739
1740    @Override
1741    public int getWeight() {
1742      return 1;
1743    }
1744
1745    @Override
1746    public ReferenceEntry<K, V> getEntry() {
1747      return null;
1748    }
1749
1750    @Override
1751    public ValueReference<K, V> copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry) {
1752      return this;
1753    }
1754
1755    @Override
1756    public boolean isLoading() {
1757      return false;
1758    }
1759
1760    @Override
1761    public boolean isActive() {
1762      return true;
1763    }
1764
1765    @Override
1766    public V waitForValue() {
1767      return get();
1768    }
1769
1770    @Override
1771    public void notifyNewValue(V newValue) {}
1772  }
1773
1774  /**
1775   * References a weak value.
1776   */
1777  static final class WeightedWeakValueReference<K, V> extends WeakValueReference<K, V> {
1778    final int weight;
1779
1780    WeightedWeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry,
1781        int weight) {
1782      super(queue, referent, entry);
1783      this.weight = weight;
1784    }
1785
1786    @Override
1787    public int getWeight() {
1788      return weight;
1789    }
1790
1791    @Override
1792    public ValueReference<K, V> copyFor(
1793        ReferenceQueue<V> queue, ReferenceEntry<K, V> entry) {
1794      return new WeightedWeakValueReference<K, V>(queue, get(), entry, weight);
1795    }
1796  }
1797
1798  /**
1799   * References a soft value.
1800   */
1801  static final class WeightedSoftValueReference<K, V> extends SoftValueReference<K, V> {
1802    final int weight;
1803
1804    WeightedSoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry,
1805        int weight) {
1806      super(queue, referent, entry);
1807      this.weight = weight;
1808    }
1809
1810    @Override
1811    public int getWeight() {
1812      return weight;
1813    }
1814    @Override
1815    public ValueReference<K, V> copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry) {
1816      return new WeightedSoftValueReference<K, V>(queue, get(), entry, weight);
1817    }
1818
1819  }
1820
1821  /**
1822   * References a strong value.
1823   */
1824  static final class WeightedStrongValueReference<K, V> extends StrongValueReference<K, V> {
1825    final int weight;
1826
1827    WeightedStrongValueReference(V referent, int weight) {
1828      super(referent);
1829      this.weight = weight;
1830    }
1831
1832    @Override
1833    public int getWeight() {
1834      return weight;
1835    }
1836  }
1837
1838  /**
1839   * Applies a supplemental hash function to a given hash code, which defends against poor quality
1840   * hash functions. This is critical when the concurrent hash map uses power-of-two length hash
1841   * tables, that otherwise encounter collisions for hash codes that do not differ in lower or
1842   * upper bits.
1843   *
1844   * @param h hash code
1845   */
1846  static int rehash(int h) {
1847    // Spread bits to regularize both segment and index locations,
1848    // using variant of single-word Wang/Jenkins hash.
1849    // TODO(kevinb): use Hashing/move this to Hashing?
1850    h += (h << 15) ^ 0xffffcd7d;
1851    h ^= (h >>> 10);
1852    h += (h << 3);
1853    h ^= (h >>> 6);
1854    h += (h << 2) + (h << 14);
1855    return h ^ (h >>> 16);
1856  }
1857
1858  /**
1859   * This method is a convenience for testing. Code should call {@link Segment#newEntry} directly.
1860   */
1861  @GuardedBy("Segment.this")
1862  @VisibleForTesting
1863  ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1864    return segmentFor(hash).newEntry(key, hash, next);
1865  }
1866
1867  /**
1868   * This method is a convenience for testing. Code should call {@link Segment#copyEntry} directly.
1869   */
1870  @GuardedBy("Segment.this")
1871  @VisibleForTesting
1872  ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
1873    int hash = original.getHash();
1874    return segmentFor(hash).copyEntry(original, newNext);
1875  }
1876
1877  /**
1878   * This method is a convenience for testing. Code should call {@link Segment#setValue} instead.
1879   */
1880  @GuardedBy("Segment.this")
1881  @VisibleForTesting
1882  ValueReference<K, V> newValueReference(ReferenceEntry<K, V> entry, V value, int weight) {
1883    int hash = entry.getHash();
1884    return valueStrength.referenceValue(segmentFor(hash), entry, value, weight);
1885  }
1886
1887  int hash(Object key) {
1888    int h = keyEquivalence.hash(key);
1889    return rehash(h);
1890  }
1891
1892  void reclaimValue(ValueReference<K, V> valueReference) {
1893    ReferenceEntry<K, V> entry = valueReference.getEntry();
1894    int hash = entry.getHash();
1895    segmentFor(hash).reclaimValue(entry.getKey(), hash, valueReference);
1896  }
1897
1898  void reclaimKey(ReferenceEntry<K, V> entry) {
1899    int hash = entry.getHash();
1900    segmentFor(hash).reclaimKey(entry, hash);
1901  }
1902
1903  /**
1904   * This method is a convenience for testing. Code should call {@link Segment#getLiveValue}
1905   * instead.
1906   */
1907  @VisibleForTesting
1908  boolean isLive(ReferenceEntry<K, V> entry, long now) {
1909    return segmentFor(entry.getHash()).getLiveValue(entry, now) != null;
1910  }
1911
1912  /**
1913   * Returns the segment that should be used for a key with the given hash.
1914   *
1915   * @param hash the hash code for the key
1916   * @return the segment
1917   */
1918  Segment<K, V> segmentFor(int hash) {
1919    // TODO(fry): Lazily create segments?
1920    return segments[(hash >>> segmentShift) & segmentMask];
1921  }
1922
1923  Segment<K, V> createSegment(
1924      int initialCapacity, long maxSegmentWeight, StatsCounter statsCounter) {
1925    return new Segment<K, V>(this, initialCapacity, maxSegmentWeight, statsCounter);
1926  }
1927
1928  /**
1929   * Gets the value from an entry. Returns null if the entry is invalid, partially-collected,
1930   * loading, or expired. Unlike {@link Segment#getLiveValue} this method does not attempt to
1931   * cleanup stale entries. As such it should only be called outside of a segment context, such as
1932   * during iteration.
1933   */
1934  @Nullable
1935  V getLiveValue(ReferenceEntry<K, V> entry, long now) {
1936    if (entry.getKey() == null) {
1937      return null;
1938    }
1939    V value = entry.getValueReference().get();
1940    if (value == null) {
1941      return null;
1942    }
1943
1944    if (isExpired(entry, now)) {
1945      return null;
1946    }
1947    return value;
1948  }
1949
1950  // expiration
1951
1952  /**
1953   * Returns true if the entry has expired.
1954   */
1955  boolean isExpired(ReferenceEntry<K, V> entry, long now) {
1956    if (expiresAfterAccess()
1957        && (now - entry.getAccessTime() > expireAfterAccessNanos)) {
1958      return true;
1959    }
1960    if (expiresAfterWrite()
1961        && (now - entry.getWriteTime() > expireAfterWriteNanos)) {
1962      return true;
1963    }
1964    return false;
1965  }
1966
1967  // queues
1968
1969  @GuardedBy("Segment.this")
1970  static <K, V> void connectAccessOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) {
1971    previous.setNextInAccessQueue(next);
1972    next.setPreviousInAccessQueue(previous);
1973  }
1974
1975  @GuardedBy("Segment.this")
1976  static <K, V> void nullifyAccessOrder(ReferenceEntry<K, V> nulled) {
1977    ReferenceEntry<K, V> nullEntry = nullEntry();
1978    nulled.setNextInAccessQueue(nullEntry);
1979    nulled.setPreviousInAccessQueue(nullEntry);
1980  }
1981
1982  @GuardedBy("Segment.this")
1983  static <K, V> void connectWriteOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) {
1984    previous.setNextInWriteQueue(next);
1985    next.setPreviousInWriteQueue(previous);
1986  }
1987
1988  @GuardedBy("Segment.this")
1989  static <K, V> void nullifyWriteOrder(ReferenceEntry<K, V> nulled) {
1990    ReferenceEntry<K, V> nullEntry = nullEntry();
1991    nulled.setNextInWriteQueue(nullEntry);
1992    nulled.setPreviousInWriteQueue(nullEntry);
1993  }
1994
1995  /**
1996   * Notifies listeners that an entry has been automatically removed due to expiration, eviction,
1997   * or eligibility for garbage collection. This should be called every time expireEntries or
1998   * evictEntry is called (once the lock is released).
1999   */
2000  void processPendingNotifications() {
2001    RemovalNotification<K, V> notification;
2002    while ((notification = removalNotificationQueue.poll()) != null) {
2003      try {
2004        removalListener.onRemoval(notification);
2005      } catch (Throwable e) {
2006        logger.log(Level.WARNING, "Exception thrown by removal listener", e);
2007      }
2008    }
2009  }
2010
2011  @SuppressWarnings("unchecked")
2012  final Segment<K, V>[] newSegmentArray(int ssize) {
2013    return new Segment[ssize];
2014  }
2015
2016  // Inner Classes
2017
2018  /**
2019   * Segments are specialized versions of hash tables. This subclass inherits from ReentrantLock
2020   * opportunistically, just to simplify some locking and avoid separate construction.
2021   */
2022  @SuppressWarnings("serial") // This class is never serialized.
2023  static class Segment<K, V> extends ReentrantLock {
2024
2025    /*
2026     * TODO(fry): Consider copying variables (like evictsBySize) from outer class into this class.
2027     * It will require more memory but will reduce indirection.
2028     */
2029
2030    /*
2031     * Segments maintain a table of entry lists that are ALWAYS kept in a consistent state, so can
2032     * be read without locking. Next fields of nodes are immutable (final). All list additions are
2033     * performed at the front of each bin. This makes it easy to check changes, and also fast to
2034     * traverse. When nodes would otherwise be changed, new nodes are created to replace them. This
2035     * works well for hash tables since the bin lists tend to be short. (The average length is less
2036     * than two.)
2037     *
2038     * Read operations can thus proceed without locking, but rely on selected uses of volatiles to
2039     * ensure that completed write operations performed by other threads are noticed. For most
2040     * purposes, the "count" field, tracking the number of elements, serves as that volatile
2041     * variable ensuring visibility. This is convenient because this field needs to be read in many
2042     * read operations anyway:
2043     *
2044     * - All (unsynchronized) read operations must first read the "count" field, and should not
2045     * look at table entries if it is 0.
2046     *
2047     * - All (synchronized) write operations should write to the "count" field after structurally
2048     * changing any bin. The operations must not take any action that could even momentarily
2049     * cause a concurrent read operation to see inconsistent data. This is made easier by the
2050     * nature of the read operations in Map. For example, no operation can reveal that the table
2051     * has grown but the threshold has not yet been updated, so there are no atomicity requirements
2052     * for this with respect to reads.
2053     *
2054     * As a guide, all critical volatile reads and writes to the count field are marked in code
2055     * comments.
2056     */
2057
2058    final LocalCache<K, V> map;
2059
2060    /**
2061     * The number of live elements in this segment's region.
2062     */
2063    volatile int count;
2064
2065    /**
2066     * The weight of the live elements in this segment's region.
2067     */
2068    @GuardedBy("Segment.this")
2069    int totalWeight;
2070
2071    /**
2072     * Number of updates that alter the size of the table. This is used during bulk-read methods to
2073     * make sure they see a consistent snapshot: If modCounts change during a traversal of segments
2074     * loading size or checking containsValue, then we might have an inconsistent view of state
2075     * so (usually) must retry.
2076     */
2077    int modCount;
2078
2079    /**
2080     * The table is expanded when its size exceeds this threshold. (The value of this field is
2081     * always {@code (int)(capacity * 0.75)}.)
2082     */
2083    int threshold;
2084
2085    /**
2086     * The per-segment table.
2087     */
2088    volatile AtomicReferenceArray<ReferenceEntry<K, V>> table;
2089
2090    /**
2091     * The maximum weight of this segment. UNSET_INT if there is no maximum.
2092     */
2093    final long maxSegmentWeight;
2094
2095    /**
2096     * The key reference queue contains entries whose keys have been garbage collected, and which
2097     * need to be cleaned up internally.
2098     */
2099    final ReferenceQueue<K> keyReferenceQueue;
2100
2101    /**
2102     * The value reference queue contains value references whose values have been garbage collected,
2103     * and which need to be cleaned up internally.
2104     */
2105    final ReferenceQueue<V> valueReferenceQueue;
2106
2107    /**
2108     * The recency queue is used to record which entries were accessed for updating the access
2109     * list's ordering. It is drained as a batch operation when either the DRAIN_THRESHOLD is
2110     * crossed or a write occurs on the segment.
2111     */
2112    final Queue<ReferenceEntry<K, V>> recencyQueue;
2113
2114    /**
2115     * A counter of the number of reads since the last write, used to drain queues on a small
2116     * fraction of read operations.
2117     */
2118    final AtomicInteger readCount = new AtomicInteger();
2119
2120    /**
2121     * A queue of elements currently in the map, ordered by write time. Elements are added to the
2122     * tail of the queue on write.
2123     */
2124    @GuardedBy("Segment.this")
2125    final Queue<ReferenceEntry<K, V>> writeQueue;
2126
2127    /**
2128     * A queue of elements currently in the map, ordered by access time. Elements are added to the
2129     * tail of the queue on access (note that writes count as accesses).
2130     */
2131    @GuardedBy("Segment.this")
2132    final Queue<ReferenceEntry<K, V>> accessQueue;
2133
2134    /** Accumulates cache statistics. */
2135    final StatsCounter statsCounter;
2136
2137    Segment(LocalCache<K, V> map, int initialCapacity, long maxSegmentWeight,
2138        StatsCounter statsCounter) {
2139      this.map = map;
2140      this.maxSegmentWeight = maxSegmentWeight;
2141      this.statsCounter = statsCounter;
2142      initTable(newEntryArray(initialCapacity));
2143
2144      keyReferenceQueue = map.usesKeyReferences()
2145           ? new ReferenceQueue<K>() : null;
2146
2147      valueReferenceQueue = map.usesValueReferences()
2148           ? new ReferenceQueue<V>() : null;
2149
2150      recencyQueue = map.usesAccessQueue()
2151          ? new ConcurrentLinkedQueue<ReferenceEntry<K, V>>()
2152          : LocalCache.<ReferenceEntry<K, V>>discardingQueue();
2153
2154      writeQueue = map.usesWriteQueue()
2155          ? new WriteQueue<K, V>()
2156          : LocalCache.<ReferenceEntry<K, V>>discardingQueue();
2157
2158      accessQueue = map.usesAccessQueue()
2159          ? new AccessQueue<K, V>()
2160          : LocalCache.<ReferenceEntry<K, V>>discardingQueue();
2161    }
2162
2163    AtomicReferenceArray<ReferenceEntry<K, V>> newEntryArray(int size) {
2164      return new AtomicReferenceArray<ReferenceEntry<K, V>>(size);
2165    }
2166
2167    void initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable) {
2168      this.threshold = newTable.length() * 3 / 4; // 0.75
2169      if (!map.customWeigher() && this.threshold == maxSegmentWeight) {
2170        // prevent spurious expansion before eviction
2171        this.threshold++;
2172      }
2173      this.table = newTable;
2174    }
2175
2176    @GuardedBy("Segment.this")
2177    ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
2178      return map.entryFactory.newEntry(this, key, hash, next);
2179    }
2180
2181    @GuardedBy("Segment.this")
2182    ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
2183      ValueReference<K, V> valueReference = original.getValueReference();
2184      ReferenceEntry<K, V> newEntry = map.entryFactory.copyEntry(this, original, newNext);
2185      newEntry.setValueReference(valueReference.copyFor(this.valueReferenceQueue, newEntry));
2186      return newEntry;
2187    }
2188
2189    /**
2190     * Sets a new value of an entry. Adds newly created entries at the end of the access queue.
2191     */
2192    @GuardedBy("Segment.this")
2193    void setValue(ReferenceEntry<K, V> entry, K key, V value, long now) {
2194      ValueReference<K, V> previous = entry.getValueReference();
2195      int weight = map.weigher.weigh(key, value);
2196      checkState(weight >= 0, "Weights must be non-negative");
2197
2198      ValueReference<K, V> valueReference =
2199          map.valueStrength.referenceValue(this, entry, value, weight);
2200      entry.setValueReference(valueReference);
2201      recordWrite(entry, weight, now);
2202      previous.notifyNewValue(value);
2203    }
2204
2205    // loading
2206
2207    V get(K key, int hash, CacheLoader<? super K, V> loader) throws ExecutionException {
2208      try {
2209        if (count != 0) { // read-volatile
2210          // don't call getLiveEntry, which would ignore loading values
2211          ReferenceEntry<K, V> e = getEntry(key, hash);
2212          if (e != null) {
2213            long now = map.ticker.read();
2214            V value = getLiveValue(e, now);
2215            if (value != null) {
2216              recordRead(e, now);
2217              statsCounter.recordHits(1);
2218              return scheduleRefresh(e, key, hash, value, now, loader);
2219            }
2220            ValueReference<K, V> valueReference = e.getValueReference();
2221            if (valueReference.isLoading()) {
2222              return waitForLoadingValue(e, key, valueReference);
2223            }
2224          }
2225        }
2226
2227        // at this point e is either null or expired;
2228        return lockedGetOrLoad(key, hash, loader);
2229      } catch (ExecutionException ee) {
2230        Throwable cause = ee.getCause();
2231        if (cause instanceof Error) {
2232          throw new ExecutionError((Error) cause);
2233        } else if (cause instanceof RuntimeException) {
2234          throw new UncheckedExecutionException(cause);
2235        }
2236        throw ee;
2237      } finally {
2238        postReadCleanup();
2239      }
2240    }
2241
2242    V lockedGetOrLoad(K key, int hash, CacheLoader<? super K, V> loader)
2243        throws ExecutionException {
2244      ReferenceEntry<K, V> e;
2245      ValueReference<K, V> valueReference = null;
2246      LoadingValueReference<K, V> loadingValueReference = null;
2247      boolean createNewEntry = true;
2248
2249      lock();
2250      try {
2251        // re-read ticker once inside the lock
2252        long now = map.ticker.read();
2253        preWriteCleanup(now);
2254
2255        int newCount = this.count - 1;
2256        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2257        int index = hash & (table.length() - 1);
2258        ReferenceEntry<K, V> first = table.get(index);
2259
2260        for (e = first; e != null; e = e.getNext()) {
2261          K entryKey = e.getKey();
2262          if (e.getHash() == hash && entryKey != null
2263              && map.keyEquivalence.equivalent(key, entryKey)) {
2264            valueReference = e.getValueReference();
2265            if (valueReference.isLoading()) {
2266              createNewEntry = false;
2267            } else {
2268              V value = valueReference.get();
2269              if (value == null) {
2270                enqueueNotification(entryKey, hash, valueReference, RemovalCause.COLLECTED);
2271              } else if (map.isExpired(e, now)) {
2272                // This is a duplicate check, as preWriteCleanup already purged expired
2273                // entries, but let's accomodate an incorrect expiration queue.
2274                enqueueNotification(entryKey, hash, valueReference, RemovalCause.EXPIRED);
2275              } else {
2276                recordLockedRead(e, now);
2277                statsCounter.recordHits(1);
2278                // we were concurrent with loading; don't consider refresh
2279                return value;
2280              }
2281
2282              // immediately reuse invalid entries
2283              writeQueue.remove(e);
2284              accessQueue.remove(e);
2285              this.count = newCount; // write-volatile
2286            }
2287            break;
2288          }
2289        }
2290
2291        if (createNewEntry) {
2292          loadingValueReference = new LoadingValueReference<K, V>();
2293
2294          if (e == null) {
2295            e = newEntry(key, hash, first);
2296            e.setValueReference(loadingValueReference);
2297            table.set(index, e);
2298          } else {
2299            e.setValueReference(loadingValueReference);
2300          }
2301        }
2302      } finally {
2303        unlock();
2304        postWriteCleanup();
2305      }
2306
2307      if (createNewEntry) {
2308        try {
2309          // Synchronizes on the entry to allow failing fast when a recursive load is
2310          // detected. This may be circumvented when an entry is copied, but will fail fast most
2311          // of the time.
2312          synchronized (e) {
2313            return loadSync(key, hash, loadingValueReference, loader);
2314          }
2315        } finally {
2316          statsCounter.recordMisses(1);
2317        }
2318      } else {
2319        // The entry already exists. Wait for loading.
2320        return waitForLoadingValue(e, key, valueReference);
2321      }
2322    }
2323
2324    V waitForLoadingValue(ReferenceEntry<K, V> e, K key, ValueReference<K, V> valueReference)
2325        throws ExecutionException {
2326      if (!valueReference.isLoading()) {
2327        throw new AssertionError();
2328      }
2329
2330      checkState(!Thread.holdsLock(e), "Recursive load");
2331      // don't consider expiration as we're concurrent with loading
2332      try {
2333        V value = valueReference.waitForValue();
2334        if (value == null) {
2335          throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + ".");
2336        }
2337        // re-read ticker now that loading has completed
2338        long now = map.ticker.read();
2339        recordRead(e, now);
2340        return value;
2341      } finally {
2342        statsCounter.recordMisses(1);
2343      }
2344    }
2345
2346    // at most one of loadSync/loadAsync may be called for any given LoadingValueReference
2347
2348    V loadSync(K key, int hash, LoadingValueReference<K, V> loadingValueReference,
2349        CacheLoader<? super K, V> loader) throws ExecutionException {
2350      ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader);
2351      return getAndRecordStats(key, hash, loadingValueReference, loadingFuture);
2352    }
2353
2354    ListenableFuture<V> loadAsync(final K key, final int hash,
2355        final LoadingValueReference<K, V> loadingValueReference, CacheLoader<? super K, V> loader) {
2356      final ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader);
2357      loadingFuture.addListener(
2358          new Runnable() {
2359            @Override
2360            public void run() {
2361              try {
2362                V newValue = getAndRecordStats(key, hash, loadingValueReference, loadingFuture);
2363                // update loadingFuture for the sake of other pending requests
2364                loadingValueReference.set(newValue);
2365              } catch (Throwable t) {
2366                logger.log(Level.WARNING, "Exception thrown during refresh", t);
2367                loadingValueReference.setException(t);
2368              }
2369            }
2370          }, sameThreadExecutor);
2371      return loadingFuture;
2372    }
2373
2374    /**
2375     * Waits uninterruptibly for {@code newValue} to be loaded, and then records loading stats.
2376     */
2377    V getAndRecordStats(K key, int hash, LoadingValueReference<K, V> loadingValueReference,
2378        ListenableFuture<V> newValue) throws ExecutionException {
2379      V value = null;
2380      try {
2381        value = getUninterruptibly(newValue);
2382        if (value == null) {
2383          throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + ".");
2384        }
2385        statsCounter.recordLoadSuccess(loadingValueReference.elapsedNanos());
2386        storeLoadedValue(key, hash, loadingValueReference, value);
2387        return value;
2388      } finally {
2389        if (value == null) {
2390          statsCounter.recordLoadException(loadingValueReference.elapsedNanos());
2391          removeLoadingValue(key, hash, loadingValueReference);
2392        }
2393      }
2394    }
2395
2396    V scheduleRefresh(ReferenceEntry<K, V> entry, K key, int hash, V oldValue, long now,
2397        CacheLoader<? super K, V> loader) {
2398      if (map.refreshes() && (now - entry.getWriteTime() > map.refreshNanos)) {
2399        V newValue = refresh(key, hash, loader);
2400        if (newValue != null) {
2401          return newValue;
2402        }
2403      }
2404      return oldValue;
2405    }
2406
2407    /**
2408     * Refreshes the value associated with {@code key}, unless another thread is already doing so.
2409     * Returns the newly refreshed value associated with {@code key} if it was refreshed inline, or
2410     * {@code null} if another thread is performing the refresh or if an error occurs during
2411     * refresh.
2412     */
2413    @Nullable
2414    V refresh(K key, int hash, CacheLoader<? super K, V> loader) {
2415      final LoadingValueReference<K, V> loadingValueReference =
2416          insertLoadingValueReference(key, hash);
2417      if (loadingValueReference == null) {
2418        return null;
2419      }
2420
2421      ListenableFuture<V> result = loadAsync(key, hash, loadingValueReference, loader);
2422      if (result.isDone()) {
2423        try {
2424          return result.get();
2425        } catch (Throwable t) {
2426          // don't let refresh exceptions propagate; error was already logged
2427        }
2428      }
2429      return null;
2430    }
2431
2432    /**
2433     * Returns a newly inserted {@code LoadingValueReference}, or null if the live value reference
2434     * is already loading.
2435     */
2436    @Nullable
2437    LoadingValueReference<K, V> insertLoadingValueReference(final K key, final int hash) {
2438      ReferenceEntry<K, V> e = null;
2439      lock();
2440      try {
2441        long now = map.ticker.read();
2442        preWriteCleanup(now);
2443
2444        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2445        int index = hash & (table.length() - 1);
2446        ReferenceEntry<K, V> first = table.get(index);
2447
2448        // Look for an existing entry.
2449        for (e = first; e != null; e = e.getNext()) {
2450          K entryKey = e.getKey();
2451          if (e.getHash() == hash && entryKey != null
2452              && map.keyEquivalence.equivalent(key, entryKey)) {
2453            // We found an existing entry.
2454
2455            ValueReference<K, V> valueReference = e.getValueReference();
2456            if (valueReference.isLoading()) {
2457              // refresh is a no-op if loading is pending
2458              return null;
2459            }
2460
2461            // continue returning old value while loading
2462            ++modCount;
2463            LoadingValueReference<K, V> loadingValueReference =
2464                new LoadingValueReference<K, V>(valueReference);
2465            e.setValueReference(loadingValueReference);
2466            return loadingValueReference;
2467          }
2468        }
2469
2470        ++modCount;
2471        LoadingValueReference<K, V> loadingValueReference = new LoadingValueReference<K, V>();
2472        e = newEntry(key, hash, first);
2473        e.setValueReference(loadingValueReference);
2474        table.set(index, e);
2475        return loadingValueReference;
2476      } finally {
2477        unlock();
2478        postWriteCleanup();
2479      }
2480    }
2481
2482    // reference queues, for garbage collection cleanup
2483
2484    /**
2485     * Cleanup collected entries when the lock is available.
2486     */
2487    void tryDrainReferenceQueues() {
2488      if (tryLock()) {
2489        try {
2490          drainReferenceQueues();
2491        } finally {
2492          unlock();
2493        }
2494      }
2495    }
2496
2497    /**
2498     * Drain the key and value reference queues, cleaning up internal entries containing garbage
2499     * collected keys or values.
2500     */
2501    @GuardedBy("Segment.this")
2502    void drainReferenceQueues() {
2503      if (map.usesKeyReferences()) {
2504        drainKeyReferenceQueue();
2505      }
2506      if (map.usesValueReferences()) {
2507        drainValueReferenceQueue();
2508      }
2509    }
2510
2511    @GuardedBy("Segment.this")
2512    void drainKeyReferenceQueue() {
2513      Reference<? extends K> ref;
2514      int i = 0;
2515      while ((ref = keyReferenceQueue.poll()) != null) {
2516        @SuppressWarnings("unchecked")
2517        ReferenceEntry<K, V> entry = (ReferenceEntry<K, V>) ref;
2518        map.reclaimKey(entry);
2519        if (++i == DRAIN_MAX) {
2520          break;
2521        }
2522      }
2523    }
2524
2525    @GuardedBy("Segment.this")
2526    void drainValueReferenceQueue() {
2527      Reference<? extends V> ref;
2528      int i = 0;
2529      while ((ref = valueReferenceQueue.poll()) != null) {
2530        @SuppressWarnings("unchecked")
2531        ValueReference<K, V> valueReference = (ValueReference<K, V>) ref;
2532        map.reclaimValue(valueReference);
2533        if (++i == DRAIN_MAX) {
2534          break;
2535        }
2536      }
2537    }
2538
2539    /**
2540     * Clears all entries from the key and value reference queues.
2541     */
2542    void clearReferenceQueues() {
2543      if (map.usesKeyReferences()) {
2544        clearKeyReferenceQueue();
2545      }
2546      if (map.usesValueReferences()) {
2547        clearValueReferenceQueue();
2548      }
2549    }
2550
2551    void clearKeyReferenceQueue() {
2552      while (keyReferenceQueue.poll() != null) {}
2553    }
2554
2555    void clearValueReferenceQueue() {
2556      while (valueReferenceQueue.poll() != null) {}
2557    }
2558
2559    // recency queue, shared by expiration and eviction
2560
2561    /**
2562     * Records the relative order in which this read was performed by adding {@code entry} to the
2563     * recency queue. At write-time, or when the queue is full past the threshold, the queue will
2564     * be drained and the entries therein processed.
2565     *
2566     * <p>Note: locked reads should use {@link #recordLockedRead}.
2567     */
2568    void recordRead(ReferenceEntry<K, V> entry, long now) {
2569      if (map.recordsAccess()) {
2570        entry.setAccessTime(now);
2571      }
2572      recencyQueue.add(entry);
2573    }
2574
2575    /**
2576     * Updates the eviction metadata that {@code entry} was just read. This currently amounts to
2577     * adding {@code entry} to relevant eviction lists.
2578     *
2579     * <p>Note: this method should only be called under lock, as it directly manipulates the
2580     * eviction queues. Unlocked reads should use {@link #recordRead}.
2581     */
2582    @GuardedBy("Segment.this")
2583    void recordLockedRead(ReferenceEntry<K, V> entry, long now) {
2584      if (map.recordsAccess()) {
2585        entry.setAccessTime(now);
2586      }
2587      accessQueue.add(entry);
2588    }
2589
2590    /**
2591     * Updates eviction metadata that {@code entry} was just written. This currently amounts to
2592     * adding {@code entry} to relevant eviction lists.
2593     */
2594    @GuardedBy("Segment.this")
2595    void recordWrite(ReferenceEntry<K, V> entry, int weight, long now) {
2596      // we are already under lock, so drain the recency queue immediately
2597      drainRecencyQueue();
2598      totalWeight += weight;
2599
2600      if (map.recordsAccess()) {
2601        entry.setAccessTime(now);
2602      }
2603      if (map.recordsWrite()) {
2604        entry.setWriteTime(now);
2605      }
2606      accessQueue.add(entry);
2607      writeQueue.add(entry);
2608    }
2609
2610    /**
2611     * Drains the recency queue, updating eviction metadata that the entries therein were read in
2612     * the specified relative order. This currently amounts to adding them to relevant eviction
2613     * lists (accounting for the fact that they could have been removed from the map since being
2614     * added to the recency queue).
2615     */
2616    @GuardedBy("Segment.this")
2617    void drainRecencyQueue() {
2618      ReferenceEntry<K, V> e;
2619      while ((e = recencyQueue.poll()) != null) {
2620        // An entry may be in the recency queue despite it being removed from
2621        // the map . This can occur when the entry was concurrently read while a
2622        // writer is removing it from the segment or after a clear has removed
2623        // all of the segment's entries.
2624        if (accessQueue.contains(e)) {
2625          accessQueue.add(e);
2626        }
2627      }
2628    }
2629
2630    // expiration
2631
2632    /**
2633     * Cleanup expired entries when the lock is available.
2634     */
2635    void tryExpireEntries(long now) {
2636      if (tryLock()) {
2637        try {
2638          expireEntries(now);
2639        } finally {
2640          unlock();
2641          // don't call postWriteCleanup as we're in a read
2642        }
2643      }
2644    }
2645
2646    @GuardedBy("Segment.this")
2647    void expireEntries(long now) {
2648      drainRecencyQueue();
2649
2650      ReferenceEntry<K, V> e;
2651      while ((e = writeQueue.peek()) != null && map.isExpired(e, now)) {
2652        if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) {
2653          throw new AssertionError();
2654        }
2655      }
2656      while ((e = accessQueue.peek()) != null && map.isExpired(e, now)) {
2657        if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) {
2658          throw new AssertionError();
2659        }
2660      }
2661    }
2662
2663    // eviction
2664
2665    @GuardedBy("Segment.this")
2666    void enqueueNotification(ReferenceEntry<K, V> entry, RemovalCause cause) {
2667      enqueueNotification(entry.getKey(), entry.getHash(), entry.getValueReference(), cause);
2668    }
2669
2670    @GuardedBy("Segment.this")
2671    void enqueueNotification(@Nullable K key, int hash, ValueReference<K, V> valueReference,
2672        RemovalCause cause) {
2673      totalWeight -= valueReference.getWeight();
2674      if (cause.wasEvicted()) {
2675        statsCounter.recordEviction();
2676      }
2677      if (map.removalNotificationQueue != DISCARDING_QUEUE) {
2678        V value = valueReference.get();
2679        RemovalNotification<K, V> notification = new RemovalNotification<K, V>(key, value, cause);
2680        map.removalNotificationQueue.offer(notification);
2681      }
2682    }
2683
2684    /**
2685     * Performs eviction if the segment is full. This should only be called prior to adding a new
2686     * entry and increasing {@code count}.
2687     */
2688    @GuardedBy("Segment.this")
2689    void evictEntries() {
2690      if (!map.evictsBySize()) {
2691        return;
2692      }
2693
2694      drainRecencyQueue();
2695      while (totalWeight > maxSegmentWeight) {
2696        ReferenceEntry<K, V> e = getNextEvictable();
2697        if (!removeEntry(e, e.getHash(), RemovalCause.SIZE)) {
2698          throw new AssertionError();
2699        }
2700      }
2701    }
2702
2703    // TODO(fry): instead implement this with an eviction head
2704    ReferenceEntry<K, V> getNextEvictable() {
2705      for (ReferenceEntry<K, V> e : accessQueue) {
2706        int weight = e.getValueReference().getWeight();
2707        if (weight > 0) {
2708          return e;
2709        }
2710      }
2711      throw new AssertionError();
2712    }
2713
2714    /**
2715     * Returns first entry of bin for given hash.
2716     */
2717    ReferenceEntry<K, V> getFirst(int hash) {
2718      // read this volatile field only once
2719      AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2720      return table.get(hash & (table.length() - 1));
2721    }
2722
2723    // Specialized implementations of map methods
2724
2725    @Nullable
2726    ReferenceEntry<K, V> getEntry(Object key, int hash) {
2727      for (ReferenceEntry<K, V> e = getFirst(hash); e != null; e = e.getNext()) {
2728        if (e.getHash() != hash) {
2729          continue;
2730        }
2731
2732        K entryKey = e.getKey();
2733        if (entryKey == null) {
2734          tryDrainReferenceQueues();
2735          continue;
2736        }
2737
2738        if (map.keyEquivalence.equivalent(key, entryKey)) {
2739          return e;
2740        }
2741      }
2742
2743      return null;
2744    }
2745
2746    @Nullable
2747    ReferenceEntry<K, V> getLiveEntry(Object key, int hash, long now) {
2748      ReferenceEntry<K, V> e = getEntry(key, hash);
2749      if (e == null) {
2750        return null;
2751      } else if (map.isExpired(e, now)) {
2752        tryExpireEntries(now);
2753        return null;
2754      }
2755      return e;
2756    }
2757
2758    /**
2759     * Gets the value from an entry. Returns null if the entry is invalid, partially-collected,
2760     * loading, or expired.
2761     */
2762    V getLiveValue(ReferenceEntry<K, V> entry, long now) {
2763      if (entry.getKey() == null) {
2764        tryDrainReferenceQueues();
2765        return null;
2766      }
2767      V value = entry.getValueReference().get();
2768      if (value == null) {
2769        tryDrainReferenceQueues();
2770        return null;
2771      }
2772
2773      if (map.isExpired(entry, now)) {
2774        tryExpireEntries(now);
2775        return null;
2776      }
2777      return value;
2778    }
2779
2780    @Nullable
2781    V get(Object key, int hash) {
2782      try {
2783        if (count != 0) { // read-volatile
2784          long now = map.ticker.read();
2785          ReferenceEntry<K, V> e = getLiveEntry(key, hash, now);
2786          if (e == null) {
2787            return null;
2788          }
2789
2790          V value = e.getValueReference().get();
2791          if (value != null) {
2792            recordRead(e, now);
2793            return scheduleRefresh(e, e.getKey(), hash, value, now, map.defaultLoader);
2794          }
2795          tryDrainReferenceQueues();
2796        }
2797        return null;
2798      } finally {
2799        postReadCleanup();
2800      }
2801    }
2802
2803    boolean containsKey(Object key, int hash) {
2804      try {
2805        if (count != 0) { // read-volatile
2806          long now = map.ticker.read();
2807          ReferenceEntry<K, V> e = getLiveEntry(key, hash, now);
2808          if (e == null) {
2809            return false;
2810          }
2811          return e.getValueReference().get() != null;
2812        }
2813
2814        return false;
2815      } finally {
2816        postReadCleanup();
2817      }
2818    }
2819
2820    /**
2821     * This method is a convenience for testing. Code should call {@link
2822     * LocalCache#containsValue} directly.
2823     */
2824    @VisibleForTesting
2825    boolean containsValue(Object value) {
2826      try {
2827        if (count != 0) { // read-volatile
2828          long now = map.ticker.read();
2829          AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2830          int length = table.length();
2831          for (int i = 0; i < length; ++i) {
2832            for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) {
2833              V entryValue = getLiveValue(e, now);
2834              if (entryValue == null) {
2835                continue;
2836              }
2837              if (map.valueEquivalence.equivalent(value, entryValue)) {
2838                return true;
2839              }
2840            }
2841          }
2842        }
2843
2844        return false;
2845      } finally {
2846        postReadCleanup();
2847      }
2848    }
2849
2850    @Nullable
2851    V put(K key, int hash, V value, boolean onlyIfAbsent) {
2852      lock();
2853      try {
2854        long now = map.ticker.read();
2855        preWriteCleanup(now);
2856
2857        int newCount = this.count + 1;
2858        if (newCount > this.threshold) { // ensure capacity
2859          expand();
2860          newCount = this.count + 1;
2861        }
2862
2863        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2864        int index = hash & (table.length() - 1);
2865        ReferenceEntry<K, V> first = table.get(index);
2866
2867        // Look for an existing entry.
2868        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
2869          K entryKey = e.getKey();
2870          if (e.getHash() == hash && entryKey != null
2871              && map.keyEquivalence.equivalent(key, entryKey)) {
2872            // We found an existing entry.
2873
2874            ValueReference<K, V> valueReference = e.getValueReference();
2875            V entryValue = valueReference.get();
2876
2877            if (entryValue == null) {
2878              ++modCount;
2879              if (valueReference.isActive()) {
2880                enqueueNotification(key, hash, valueReference, RemovalCause.COLLECTED);
2881                setValue(e, key, value, now);
2882                newCount = this.count; // count remains unchanged
2883              } else {
2884                setValue(e, key, value, now);
2885                newCount = this.count + 1;
2886              }
2887              this.count = newCount; // write-volatile
2888              evictEntries();
2889              return null;
2890            } else if (onlyIfAbsent) {
2891              // Mimic
2892              // "if (!map.containsKey(key)) ...
2893              // else return map.get(key);
2894              recordLockedRead(e, now);
2895              return entryValue;
2896            } else {
2897              // clobber existing entry, count remains unchanged
2898              ++modCount;
2899              enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
2900              setValue(e, key, value, now);
2901              evictEntries();
2902              return entryValue;
2903            }
2904          }
2905        }
2906
2907        // Create a new entry.
2908        ++modCount;
2909        ReferenceEntry<K, V> newEntry = newEntry(key, hash, first);
2910        setValue(newEntry, key, value, now);
2911        table.set(index, newEntry);
2912        newCount = this.count + 1;
2913        this.count = newCount; // write-volatile
2914        evictEntries();
2915        return null;
2916      } finally {
2917        unlock();
2918        postWriteCleanup();
2919      }
2920    }
2921
2922    /**
2923     * Expands the table if possible.
2924     */
2925    @GuardedBy("Segment.this")
2926    void expand() {
2927      AtomicReferenceArray<ReferenceEntry<K, V>> oldTable = table;
2928      int oldCapacity = oldTable.length();
2929      if (oldCapacity >= MAXIMUM_CAPACITY) {
2930        return;
2931      }
2932
2933      /*
2934       * Reclassify nodes in each list to new Map. Because we are using power-of-two expansion, the
2935       * elements from each bin must either stay at same index, or move with a power of two offset.
2936       * We eliminate unnecessary node creation by catching cases where old nodes can be reused
2937       * because their next fields won't change. Statistically, at the default threshold, only
2938       * about one-sixth of them need cloning when a table doubles. The nodes they replace will be
2939       * garbage collectable as soon as they are no longer referenced by any reader thread that may
2940       * be in the midst of traversing table right now.
2941       */
2942
2943      int newCount = count;
2944      AtomicReferenceArray<ReferenceEntry<K, V>> newTable = newEntryArray(oldCapacity << 1);
2945      threshold = newTable.length() * 3 / 4;
2946      int newMask = newTable.length() - 1;
2947      for (int oldIndex = 0; oldIndex < oldCapacity; ++oldIndex) {
2948        // We need to guarantee that any existing reads of old Map can
2949        // proceed. So we cannot yet null out each bin.
2950        ReferenceEntry<K, V> head = oldTable.get(oldIndex);
2951
2952        if (head != null) {
2953          ReferenceEntry<K, V> next = head.getNext();
2954          int headIndex = head.getHash() & newMask;
2955
2956          // Single node on list
2957          if (next == null) {
2958            newTable.set(headIndex, head);
2959          } else {
2960            // Reuse the consecutive sequence of nodes with the same target
2961            // index from the end of the list. tail points to the first
2962            // entry in the reusable list.
2963            ReferenceEntry<K, V> tail = head;
2964            int tailIndex = headIndex;
2965            for (ReferenceEntry<K, V> e = next; e != null; e = e.getNext()) {
2966              int newIndex = e.getHash() & newMask;
2967              if (newIndex != tailIndex) {
2968                // The index changed. We'll need to copy the previous entry.
2969                tailIndex = newIndex;
2970                tail = e;
2971              }
2972            }
2973            newTable.set(tailIndex, tail);
2974
2975            // Clone nodes leading up to the tail.
2976            for (ReferenceEntry<K, V> e = head; e != tail; e = e.getNext()) {
2977              if (isCollected(e)) {
2978                removeCollectedEntry(e);
2979                newCount--;
2980              } else {
2981                int newIndex = e.getHash() & newMask;
2982                ReferenceEntry<K, V> newNext = newTable.get(newIndex);
2983                ReferenceEntry<K, V> newFirst = copyEntry(e, newNext);
2984                newTable.set(newIndex, newFirst);
2985              }
2986            }
2987          }
2988        }
2989      }
2990      table = newTable;
2991      this.count = newCount;
2992    }
2993
2994    boolean replace(K key, int hash, V oldValue, V newValue) {
2995      lock();
2996      try {
2997        long now = map.ticker.read();
2998        preWriteCleanup(now);
2999
3000        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3001        int index = hash & (table.length() - 1);
3002        ReferenceEntry<K, V> first = table.get(index);
3003
3004        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3005          K entryKey = e.getKey();
3006          if (e.getHash() == hash && entryKey != null
3007              && map.keyEquivalence.equivalent(key, entryKey)) {
3008            ValueReference<K, V> valueReference = e.getValueReference();
3009            V entryValue = valueReference.get();
3010            if (entryValue == null) {
3011              if (valueReference.isActive()) {
3012                // If the value disappeared, this entry is partially collected.
3013                int newCount = this.count - 1;
3014                ++modCount;
3015                ReferenceEntry<K, V> newFirst = removeValueFromChain(
3016                    first, e, entryKey, hash, valueReference, RemovalCause.COLLECTED);
3017                newCount = this.count - 1;
3018                table.set(index, newFirst);
3019                this.count = newCount; // write-volatile
3020              }
3021              return false;
3022            }
3023
3024            if (map.valueEquivalence.equivalent(oldValue, entryValue)) {
3025              ++modCount;
3026              enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
3027              setValue(e, key, newValue, now);
3028              evictEntries();
3029              return true;
3030            } else {
3031              // Mimic
3032              // "if (map.containsKey(key) && map.get(key).equals(oldValue))..."
3033              recordLockedRead(e, now);
3034              return false;
3035            }
3036          }
3037        }
3038
3039        return false;
3040      } finally {
3041        unlock();
3042        postWriteCleanup();
3043      }
3044    }
3045
3046    @Nullable
3047    V replace(K key, int hash, V newValue) {
3048      lock();
3049      try {
3050        long now = map.ticker.read();
3051        preWriteCleanup(now);
3052
3053        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3054        int index = hash & (table.length() - 1);
3055        ReferenceEntry<K, V> first = table.get(index);
3056
3057        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3058          K entryKey = e.getKey();
3059          if (e.getHash() == hash && entryKey != null
3060              && map.keyEquivalence.equivalent(key, entryKey)) {
3061            ValueReference<K, V> valueReference = e.getValueReference();
3062            V entryValue = valueReference.get();
3063            if (entryValue == null) {
3064              if (valueReference.isActive()) {
3065                // If the value disappeared, this entry is partially collected.
3066                int newCount = this.count - 1;
3067                ++modCount;
3068                ReferenceEntry<K, V> newFirst = removeValueFromChain(
3069                    first, e, entryKey, hash, valueReference, RemovalCause.COLLECTED);
3070                newCount = this.count - 1;
3071                table.set(index, newFirst);
3072                this.count = newCount; // write-volatile
3073              }
3074              return null;
3075            }
3076
3077            ++modCount;
3078            enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
3079            setValue(e, key, newValue, now);
3080            evictEntries();
3081            return entryValue;
3082          }
3083        }
3084
3085        return null;
3086      } finally {
3087        unlock();
3088        postWriteCleanup();
3089      }
3090    }
3091
3092    @Nullable
3093    V remove(Object key, int hash) {
3094      lock();
3095      try {
3096        long now = map.ticker.read();
3097        preWriteCleanup(now);
3098
3099        int newCount = this.count - 1;
3100        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3101        int index = hash & (table.length() - 1);
3102        ReferenceEntry<K, V> first = table.get(index);
3103
3104        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3105          K entryKey = e.getKey();
3106          if (e.getHash() == hash && entryKey != null
3107              && map.keyEquivalence.equivalent(key, entryKey)) {
3108            ValueReference<K, V> valueReference = e.getValueReference();
3109            V entryValue = valueReference.get();
3110
3111            RemovalCause cause;
3112            if (entryValue != null) {
3113              cause = RemovalCause.EXPLICIT;
3114            } else if (valueReference.isActive()) {
3115              cause = RemovalCause.COLLECTED;
3116            } else {
3117              // currently loading
3118              return null;
3119            }
3120
3121            ++modCount;
3122            ReferenceEntry<K, V> newFirst = removeValueFromChain(
3123                first, e, entryKey, hash, valueReference, cause);
3124            newCount = this.count - 1;
3125            table.set(index, newFirst);
3126            this.count = newCount; // write-volatile
3127            return entryValue;
3128          }
3129        }
3130
3131        return null;
3132      } finally {
3133        unlock();
3134        postWriteCleanup();
3135      }
3136    }
3137
3138    boolean storeLoadedValue(K key, int hash, LoadingValueReference<K, V> oldValueReference,
3139        V newValue) {
3140      lock();
3141      try {
3142        long now = map.ticker.read();
3143        preWriteCleanup(now);
3144
3145        int newCount = this.count + 1;
3146        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3147        int index = hash & (table.length() - 1);
3148        ReferenceEntry<K, V> first = table.get(index);
3149
3150        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3151          K entryKey = e.getKey();
3152          if (e.getHash() == hash && entryKey != null
3153              && map.keyEquivalence.equivalent(key, entryKey)) {
3154            ValueReference<K, V> valueReference = e.getValueReference();
3155            V entryValue = valueReference.get();
3156            if (entryValue == null || oldValueReference == valueReference) {
3157              ++modCount;
3158              if (oldValueReference.isActive()) {
3159                RemovalCause cause =
3160                    (entryValue == null) ? RemovalCause.COLLECTED : RemovalCause.REPLACED;
3161                enqueueNotification(key, hash, oldValueReference, cause);
3162                newCount--;
3163              }
3164              setValue(e, key, newValue, now);
3165              this.count = newCount; // write-volatile
3166              evictEntries();
3167              return true;
3168            }
3169
3170            // the loaded value was already clobbered
3171            valueReference = new WeightedStrongValueReference<K, V>(newValue, 0);
3172            enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
3173            return false;
3174          }
3175        }
3176
3177        ++modCount;
3178        ReferenceEntry<K, V> newEntry = newEntry(key, hash, first);
3179        setValue(newEntry, key, newValue, now);
3180        table.set(index, newEntry);
3181        this.count = newCount; // write-volatile
3182        evictEntries();
3183        return true;
3184      } finally {
3185        unlock();
3186        postWriteCleanup();
3187      }
3188    }
3189
3190    boolean remove(Object key, int hash, Object value) {
3191      lock();
3192      try {
3193        long now = map.ticker.read();
3194        preWriteCleanup(now);
3195
3196        int newCount = this.count - 1;
3197        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3198        int index = hash & (table.length() - 1);
3199        ReferenceEntry<K, V> first = table.get(index);
3200
3201        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3202          K entryKey = e.getKey();
3203          if (e.getHash() == hash && entryKey != null
3204              && map.keyEquivalence.equivalent(key, entryKey)) {
3205            ValueReference<K, V> valueReference = e.getValueReference();
3206            V entryValue = valueReference.get();
3207
3208            RemovalCause cause;
3209            if (map.valueEquivalence.equivalent(value, entryValue)) {
3210              cause = RemovalCause.EXPLICIT;
3211            } else if (entryValue == null && valueReference.isActive()) {
3212              cause = RemovalCause.COLLECTED;
3213            } else {
3214              // currently loading
3215              return false;
3216            }
3217
3218            ++modCount;
3219            ReferenceEntry<K, V> newFirst = removeValueFromChain(
3220                first, e, entryKey, hash, valueReference, cause);
3221            newCount = this.count - 1;
3222            table.set(index, newFirst);
3223            this.count = newCount; // write-volatile
3224            return (cause == RemovalCause.EXPLICIT);
3225          }
3226        }
3227
3228        return false;
3229      } finally {
3230        unlock();
3231        postWriteCleanup();
3232      }
3233    }
3234
3235    void clear() {
3236      if (count != 0) { // read-volatile
3237        lock();
3238        try {
3239          AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3240          for (int i = 0; i < table.length(); ++i) {
3241            for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) {
3242              // Loading references aren't actually in the map yet.
3243              if (e.getValueReference().isActive()) {
3244                enqueueNotification(e, RemovalCause.EXPLICIT);
3245              }
3246            }
3247          }
3248          for (int i = 0; i < table.length(); ++i) {
3249            table.set(i, null);
3250          }
3251          clearReferenceQueues();
3252          writeQueue.clear();
3253          accessQueue.clear();
3254          readCount.set(0);
3255
3256          ++modCount;
3257          count = 0; // write-volatile
3258        } finally {
3259          unlock();
3260          postWriteCleanup();
3261        }
3262      }
3263    }
3264
3265    @GuardedBy("Segment.this")
3266    @Nullable
3267    ReferenceEntry<K, V> removeValueFromChain(ReferenceEntry<K, V> first,
3268        ReferenceEntry<K, V> entry, @Nullable K key, int hash, ValueReference<K, V> valueReference,
3269        RemovalCause cause) {
3270      enqueueNotification(key, hash, valueReference, cause);
3271      writeQueue.remove(entry);
3272      accessQueue.remove(entry);
3273
3274      if (valueReference.isLoading()) {
3275        valueReference.notifyNewValue(null);
3276        return first;
3277      } else {
3278        return removeEntryFromChain(first, entry);
3279      }
3280    }
3281
3282    @GuardedBy("Segment.this")
3283    @Nullable
3284    ReferenceEntry<K, V> removeEntryFromChain(ReferenceEntry<K, V> first,
3285        ReferenceEntry<K, V> entry) {
3286      int newCount = count;
3287      ReferenceEntry<K, V> newFirst = entry.getNext();
3288      for (ReferenceEntry<K, V> e = first; e != entry; e = e.getNext()) {
3289        if (isCollected(e)) {
3290          removeCollectedEntry(e);
3291          newCount--;
3292        } else {
3293          newFirst = copyEntry(e, newFirst);
3294        }
3295      }
3296      this.count = newCount;
3297      return newFirst;
3298    }
3299
3300    @GuardedBy("Segment.this")
3301    void removeCollectedEntry(ReferenceEntry<K, V> entry) {
3302      enqueueNotification(entry, RemovalCause.COLLECTED);
3303      writeQueue.remove(entry);
3304      accessQueue.remove(entry);
3305    }
3306
3307    /**
3308     * Removes an entry whose key has been garbage collected.
3309     */
3310    boolean reclaimKey(ReferenceEntry<K, V> entry, int hash) {
3311      lock();
3312      try {
3313        int newCount = count - 1;
3314        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3315        int index = hash & (table.length() - 1);
3316        ReferenceEntry<K, V> first = table.get(index);
3317
3318        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3319          if (e == entry) {
3320            ++modCount;
3321            ReferenceEntry<K, V> newFirst = removeValueFromChain(
3322                first, e, e.getKey(), hash, e.getValueReference(), RemovalCause.COLLECTED);
3323            newCount = this.count - 1;
3324            table.set(index, newFirst);
3325            this.count = newCount; // write-volatile
3326            return true;
3327          }
3328        }
3329
3330        return false;
3331      } finally {
3332        unlock();
3333        postWriteCleanup();
3334      }
3335    }
3336
3337    /**
3338     * Removes an entry whose value has been garbage collected.
3339     */
3340    boolean reclaimValue(K key, int hash, ValueReference<K, V> valueReference) {
3341      lock();
3342      try {
3343        int newCount = this.count - 1;
3344        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3345        int index = hash & (table.length() - 1);
3346        ReferenceEntry<K, V> first = table.get(index);
3347
3348        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3349          K entryKey = e.getKey();
3350          if (e.getHash() == hash && entryKey != null
3351              && map.keyEquivalence.equivalent(key, entryKey)) {
3352            ValueReference<K, V> v = e.getValueReference();
3353            if (v == valueReference) {
3354              ++modCount;
3355              ReferenceEntry<K, V> newFirst = removeValueFromChain(
3356                  first, e, entryKey, hash, valueReference, RemovalCause.COLLECTED);
3357              newCount = this.count - 1;
3358              table.set(index, newFirst);
3359              this.count = newCount; // write-volatile
3360              return true;
3361            }
3362            return false;
3363          }
3364        }
3365
3366        return false;
3367      } finally {
3368        unlock();
3369        if (!isHeldByCurrentThread()) { // don't cleanup inside of put
3370          postWriteCleanup();
3371        }
3372      }
3373    }
3374
3375    boolean removeLoadingValue(K key, int hash, LoadingValueReference<K, V> valueReference) {
3376      lock();
3377      try {
3378        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3379        int index = hash & (table.length() - 1);
3380        ReferenceEntry<K, V> first = table.get(index);
3381
3382        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3383          K entryKey = e.getKey();
3384          if (e.getHash() == hash && entryKey != null
3385              && map.keyEquivalence.equivalent(key, entryKey)) {
3386            ValueReference<K, V> v = e.getValueReference();
3387            if (v == valueReference) {
3388              if (valueReference.isActive()) {
3389                e.setValueReference(valueReference.getOldValue());
3390              } else {
3391                ReferenceEntry<K, V> newFirst = removeEntryFromChain(first, e);
3392                table.set(index, newFirst);
3393              }
3394              return true;
3395            }
3396            return false;
3397          }
3398        }
3399
3400        return false;
3401      } finally {
3402        unlock();
3403        postWriteCleanup();
3404      }
3405    }
3406
3407    @GuardedBy("Segment.this")
3408    boolean removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause) {
3409      int newCount = this.count - 1;
3410      AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3411      int index = hash & (table.length() - 1);
3412      ReferenceEntry<K, V> first = table.get(index);
3413
3414      for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3415        if (e == entry) {
3416          ++modCount;
3417          ReferenceEntry<K, V> newFirst = removeValueFromChain(
3418              first, e, e.getKey(), hash, e.getValueReference(), cause);
3419          newCount = this.count - 1;
3420          table.set(index, newFirst);
3421          this.count = newCount; // write-volatile
3422          return true;
3423        }
3424      }
3425
3426      return false;
3427    }
3428
3429    /**
3430     * Returns true if the entry has been partially collected, meaning that either the key is null,
3431     * or the value is active but null.
3432     */
3433    boolean isCollected(ReferenceEntry<K, V> entry) {
3434      if (entry.getKey() == null) {
3435        return true;
3436      }
3437      ValueReference<K, V> valueReference = entry.getValueReference();
3438      return (valueReference.get() == null) && valueReference.isActive();
3439    }
3440
3441    /**
3442     * Performs routine cleanup following a read. Normally cleanup happens during writes. If cleanup
3443     * is not observed after a sufficient number of reads, try cleaning up from the read thread.
3444     */
3445    void postReadCleanup() {
3446      if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) {
3447        cleanUp();
3448      }
3449    }
3450
3451    /**
3452     * Performs routine cleanup prior to executing a write. This should be called every time a
3453     * write thread acquires the segment lock, immediately after acquiring the lock.
3454     *
3455     * <p>Post-condition: expireEntries has been run.
3456     */
3457    @GuardedBy("Segment.this")
3458    void preWriteCleanup(long now) {
3459      runLockedCleanup(now);
3460    }
3461
3462    /**
3463     * Performs routine cleanup following a write.
3464     */
3465    void postWriteCleanup() {
3466      runUnlockedCleanup();
3467    }
3468
3469    void cleanUp() {
3470      long now = map.ticker.read();
3471      runLockedCleanup(now);
3472      runUnlockedCleanup();
3473    }
3474
3475    void runLockedCleanup(long now) {
3476      if (tryLock()) {
3477        try {
3478          drainReferenceQueues();
3479          expireEntries(now); // calls drainRecencyQueue
3480          readCount.set(0);
3481        } finally {
3482          unlock();
3483        }
3484      }
3485    }
3486
3487    void runUnlockedCleanup() {
3488      // locked cleanup may generate notifications we can send unlocked
3489      if (!isHeldByCurrentThread()) {
3490        map.processPendingNotifications();
3491      }
3492    }
3493
3494  }
3495
3496  static class LoadingValueReference<K, V> implements ValueReference<K, V> {
3497    volatile ValueReference<K, V> oldValue;
3498
3499    // TODO(fry): rename get, then extend AbstractFuture instead of containing SettableFuture
3500    final SettableFuture<V> futureValue = SettableFuture.create();
3501    final Stopwatch stopwatch = new Stopwatch();
3502
3503    public LoadingValueReference() {
3504      this(LocalCache.<K, V>unset());
3505    }
3506
3507    public LoadingValueReference(ValueReference<K, V> oldValue) {
3508      this.oldValue = oldValue;
3509    }
3510
3511    @Override
3512    public boolean isLoading() {
3513      return true;
3514    }
3515
3516    @Override
3517    public boolean isActive() {
3518      return oldValue.isActive();
3519    }
3520
3521    @Override
3522    public int getWeight() {
3523      return oldValue.getWeight();
3524    }
3525
3526    public boolean set(@Nullable V newValue) {
3527      return futureValue.set(newValue);
3528    }
3529
3530    public boolean setException(Throwable t) {
3531      return setException(futureValue, t);
3532    }
3533
3534    private static boolean setException(SettableFuture<?> future, Throwable t) {
3535      try {
3536        return future.setException(t);
3537      } catch (Error e) {
3538        // the error will already be propagated by the loading thread
3539        return false;
3540      }
3541    }
3542
3543    private ListenableFuture<V> fullyFailedFuture(Throwable t) {
3544      SettableFuture<V> future = SettableFuture.create();
3545      setException(future, t);
3546      return future;
3547    }
3548
3549    @Override
3550    public void notifyNewValue(@Nullable V newValue) {
3551      if (newValue != null) {
3552        // The pending load was clobbered by a manual write.
3553        // Unblock all pending gets, and have them return the new value.
3554        set(newValue);
3555      } else {
3556        // The pending load was removed. Delay notifications until loading completes.
3557        oldValue = unset();
3558      }
3559
3560      // TODO(fry): could also cancel loading if we had a handle on its future
3561    }
3562
3563    public ListenableFuture<V> loadFuture(K key, CacheLoader<? super K, V> loader) {
3564      stopwatch.start();
3565      V previousValue = oldValue.get();
3566      try {
3567        if (previousValue == null) {
3568          V newValue = loader.load(key);
3569          return set(newValue) ? futureValue : Futures.immediateFuture(newValue);
3570        } else {
3571          ListenableFuture<V> newValue = loader.reload(key, previousValue);
3572          // rely on loadAsync to call set in order to avoid adding a second listener here
3573          return newValue != null ? newValue : Futures.<V>immediateFuture(null);
3574        }
3575      } catch (Throwable t) {
3576        return setException(t) ? futureValue : fullyFailedFuture(t);
3577      }
3578    }
3579
3580    public long elapsedNanos() {
3581      return stopwatch.elapsedTime(NANOSECONDS);
3582    }
3583
3584    @Override
3585    public V waitForValue() throws ExecutionException {
3586      return getUninterruptibly(futureValue);
3587    }
3588
3589    @Override
3590    public V get() {
3591      return oldValue.get();
3592    }
3593
3594    public ValueReference<K, V> getOldValue() {
3595      return oldValue;
3596    }
3597
3598    @Override
3599    public ReferenceEntry<K, V> getEntry() {
3600      return null;
3601    }
3602
3603    @Override
3604    public ValueReference<K, V> copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry) {
3605      return this;
3606    }
3607  }
3608
3609  // Queues
3610
3611  /**
3612   * A custom queue for managing eviction order. Note that this is tightly integrated with {@code
3613   * ReferenceEntry}, upon which it relies to perform its linking.
3614   *
3615   * <p>Note that this entire implementation makes the assumption that all elements which are in
3616   * the map are also in this queue, and that all elements not in the queue are not in the map.
3617   *
3618   * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle
3619   * of the queue as part of copyWriteEntry, and (2) the contains method is highly optimized
3620   * for the current model.
3621   */
3622  static final class WriteQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> {
3623    final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() {
3624
3625      @Override
3626      public long getWriteTime() {
3627        return Long.MAX_VALUE;
3628      }
3629
3630      @Override
3631      public void setWriteTime(long time) {}
3632
3633      ReferenceEntry<K, V> nextWrite = this;
3634
3635      @Override
3636      public ReferenceEntry<K, V> getNextInWriteQueue() {
3637        return nextWrite;
3638      }
3639
3640      @Override
3641      public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
3642        this.nextWrite = next;
3643      }
3644
3645      ReferenceEntry<K, V> previousWrite = this;
3646
3647      @Override
3648      public ReferenceEntry<K, V> getPreviousInWriteQueue() {
3649        return previousWrite;
3650      }
3651
3652      @Override
3653      public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
3654        this.previousWrite = previous;
3655      }
3656    };
3657
3658    // implements Queue
3659
3660    @Override
3661    public boolean offer(ReferenceEntry<K, V> entry) {
3662      // unlink
3663      connectWriteOrder(entry.getPreviousInWriteQueue(), entry.getNextInWriteQueue());
3664
3665      // add to tail
3666      connectWriteOrder(head.getPreviousInWriteQueue(), entry);
3667      connectWriteOrder(entry, head);
3668
3669      return true;
3670    }
3671
3672    @Override
3673    public ReferenceEntry<K, V> peek() {
3674      ReferenceEntry<K, V> next = head.getNextInWriteQueue();
3675      return (next == head) ? null : next;
3676    }
3677
3678    @Override
3679    public ReferenceEntry<K, V> poll() {
3680      ReferenceEntry<K, V> next = head.getNextInWriteQueue();
3681      if (next == head) {
3682        return null;
3683      }
3684
3685      remove(next);
3686      return next;
3687    }
3688
3689    @Override
3690    @SuppressWarnings("unchecked")
3691    public boolean remove(Object o) {
3692      ReferenceEntry<K, V> e = (ReferenceEntry) o;
3693      ReferenceEntry<K, V> previous = e.getPreviousInWriteQueue();
3694      ReferenceEntry<K, V> next = e.getNextInWriteQueue();
3695      connectWriteOrder(previous, next);
3696      nullifyWriteOrder(e);
3697
3698      return next != NullEntry.INSTANCE;
3699    }
3700
3701    @Override
3702    @SuppressWarnings("unchecked")
3703    public boolean contains(Object o) {
3704      ReferenceEntry<K, V> e = (ReferenceEntry) o;
3705      return e.getNextInWriteQueue() != NullEntry.INSTANCE;
3706    }
3707
3708    @Override
3709    public boolean isEmpty() {
3710      return head.getNextInWriteQueue() == head;
3711    }
3712
3713    @Override
3714    public int size() {
3715      int size = 0;
3716      for (ReferenceEntry<K, V> e = head.getNextInWriteQueue(); e != head;
3717          e = e.getNextInWriteQueue()) {
3718        size++;
3719      }
3720      return size;
3721    }
3722
3723    @Override
3724    public void clear() {
3725      ReferenceEntry<K, V> e = head.getNextInWriteQueue();
3726      while (e != head) {
3727        ReferenceEntry<K, V> next = e.getNextInWriteQueue();
3728        nullifyWriteOrder(e);
3729        e = next;
3730      }
3731
3732      head.setNextInWriteQueue(head);
3733      head.setPreviousInWriteQueue(head);
3734    }
3735
3736    @Override
3737    public Iterator<ReferenceEntry<K, V>> iterator() {
3738      return new AbstractLinkedIterator<ReferenceEntry<K, V>>(peek()) {
3739        @Override
3740        protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) {
3741          ReferenceEntry<K, V> next = previous.getNextInWriteQueue();
3742          return (next == head) ? null : next;
3743        }
3744      };
3745    }
3746  }
3747
3748  /**
3749   * A custom queue for managing access order. Note that this is tightly integrated with
3750   * {@code ReferenceEntry}, upon which it reliese to perform its linking.
3751   *
3752   * <p>Note that this entire implementation makes the assumption that all elements which are in
3753   * the map are also in this queue, and that all elements not in the queue are not in the map.
3754   *
3755   * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle
3756   * of the queue as part of copyWriteEntry, and (2) the contains method is highly optimized
3757   * for the current model.
3758   */
3759  static final class AccessQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> {
3760    final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() {
3761
3762      @Override
3763      public long getAccessTime() {
3764        return Long.MAX_VALUE;
3765      }
3766
3767      @Override
3768      public void setAccessTime(long time) {}
3769
3770      ReferenceEntry<K, V> nextAccess = this;
3771
3772      @Override
3773      public ReferenceEntry<K, V> getNextInAccessQueue() {
3774        return nextAccess;
3775      }
3776
3777      @Override
3778      public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
3779        this.nextAccess = next;
3780      }
3781
3782      ReferenceEntry<K, V> previousAccess = this;
3783
3784      @Override
3785      public ReferenceEntry<K, V> getPreviousInAccessQueue() {
3786        return previousAccess;
3787      }
3788
3789      @Override
3790      public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
3791        this.previousAccess = previous;
3792      }
3793    };
3794
3795    // implements Queue
3796
3797    @Override
3798    public boolean offer(ReferenceEntry<K, V> entry) {
3799      // unlink
3800      connectAccessOrder(entry.getPreviousInAccessQueue(), entry.getNextInAccessQueue());
3801
3802      // add to tail
3803      connectAccessOrder(head.getPreviousInAccessQueue(), entry);
3804      connectAccessOrder(entry, head);
3805
3806      return true;
3807    }
3808
3809    @Override
3810    public ReferenceEntry<K, V> peek() {
3811      ReferenceEntry<K, V> next = head.getNextInAccessQueue();
3812      return (next == head) ? null : next;
3813    }
3814
3815    @Override
3816    public ReferenceEntry<K, V> poll() {
3817      ReferenceEntry<K, V> next = head.getNextInAccessQueue();
3818      if (next == head) {
3819        return null;
3820      }
3821
3822      remove(next);
3823      return next;
3824    }
3825
3826    @Override
3827    @SuppressWarnings("unchecked")
3828    public boolean remove(Object o) {
3829      ReferenceEntry<K, V> e = (ReferenceEntry) o;
3830      ReferenceEntry<K, V> previous = e.getPreviousInAccessQueue();
3831      ReferenceEntry<K, V> next = e.getNextInAccessQueue();
3832      connectAccessOrder(previous, next);
3833      nullifyAccessOrder(e);
3834
3835      return next != NullEntry.INSTANCE;
3836    }
3837
3838    @Override
3839    @SuppressWarnings("unchecked")
3840    public boolean contains(Object o) {
3841      ReferenceEntry<K, V> e = (ReferenceEntry) o;
3842      return e.getNextInAccessQueue() != NullEntry.INSTANCE;
3843    }
3844
3845    @Override
3846    public boolean isEmpty() {
3847      return head.getNextInAccessQueue() == head;
3848    }
3849
3850    @Override
3851    public int size() {
3852      int size = 0;
3853      for (ReferenceEntry<K, V> e = head.getNextInAccessQueue(); e != head;
3854          e = e.getNextInAccessQueue()) {
3855        size++;
3856      }
3857      return size;
3858    }
3859
3860    @Override
3861    public void clear() {
3862      ReferenceEntry<K, V> e = head.getNextInAccessQueue();
3863      while (e != head) {
3864        ReferenceEntry<K, V> next = e.getNextInAccessQueue();
3865        nullifyAccessOrder(e);
3866        e = next;
3867      }
3868
3869      head.setNextInAccessQueue(head);
3870      head.setPreviousInAccessQueue(head);
3871    }
3872
3873    @Override
3874    public Iterator<ReferenceEntry<K, V>> iterator() {
3875      return new AbstractLinkedIterator<ReferenceEntry<K, V>>(peek()) {
3876        @Override
3877        protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) {
3878          ReferenceEntry<K, V> next = previous.getNextInAccessQueue();
3879          return (next == head) ? null : next;
3880        }
3881      };
3882    }
3883  }
3884
3885  // Cache support
3886
3887  public void cleanUp() {
3888    for (Segment<?, ?> segment : segments) {
3889      segment.cleanUp();
3890    }
3891  }
3892
3893  // ConcurrentMap methods
3894
3895  @Override
3896  public boolean isEmpty() {
3897    /*
3898     * Sum per-segment modCounts to avoid mis-reporting when elements are concurrently added and
3899     * removed in one segment while checking another, in which case the table was never actually
3900     * empty at any point. (The sum ensures accuracy up through at least 1<<31 per-segment
3901     * modifications before recheck.)  Method containsValue() uses similar constructions for
3902     * stability checks.
3903     */
3904    long sum = 0L;
3905    Segment<K, V>[] segments = this.segments;
3906    for (int i = 0; i < segments.length; ++i) {
3907      if (segments[i].count != 0) {
3908        return false;
3909      }
3910      sum += segments[i].modCount;
3911    }
3912
3913    if (sum != 0L) { // recheck unless no modifications
3914      for (int i = 0; i < segments.length; ++i) {
3915        if (segments[i].count != 0) {
3916          return false;
3917        }
3918        sum -= segments[i].modCount;
3919      }
3920      if (sum != 0L) {
3921        return false;
3922      }
3923    }
3924    return true;
3925  }
3926
3927  long longSize() {
3928    Segment<K, V>[] segments = this.segments;
3929    long sum = 0;
3930    for (int i = 0; i < segments.length; ++i) {
3931      sum += segments[i].count;
3932    }
3933    return sum;
3934  }
3935
3936  @Override
3937  public int size() {
3938    return Ints.saturatedCast(longSize());
3939  }
3940
3941  @Override
3942  @Nullable
3943  public V get(@Nullable Object key) {
3944    if (key == null) {
3945      return null;
3946    }
3947    int hash = hash(key);
3948    return segmentFor(hash).get(key, hash);
3949  }
3950
3951  @Nullable
3952  public V getIfPresent(Object key) {
3953    int hash = hash(checkNotNull(key));
3954    V value = segmentFor(hash).get(key, hash);
3955    if (value == null) {
3956      globalStatsCounter.recordMisses(1);
3957    } else {
3958      globalStatsCounter.recordHits(1);
3959    }
3960    return value;
3961  }
3962
3963  V get(K key, CacheLoader<? super K, V> loader) throws ExecutionException {
3964    int hash = hash(checkNotNull(key));
3965    return segmentFor(hash).get(key, hash, loader);
3966  }
3967
3968  V getOrLoad(K key) throws ExecutionException {
3969    return get(key, defaultLoader);
3970  }
3971
3972  ImmutableMap<K, V> getAllPresent(Iterable<? extends K> keys) {
3973    int hits = 0;
3974    int misses = 0;
3975
3976    Map<K, V> result = Maps.newLinkedHashMap();
3977    for (K key : keys) {
3978      V value = get(key);
3979      if (value == null) {
3980        misses++;
3981      } else {
3982        result.put(key, value);
3983        hits++;
3984      }
3985    }
3986    globalStatsCounter.recordHits(hits);
3987    globalStatsCounter.recordMisses(misses);
3988    return ImmutableMap.copyOf(result);
3989  }
3990
3991  ImmutableMap<K, V> getAll(Iterable<? extends K> keys)
3992      throws ExecutionException {
3993    int hits = 0;
3994    int misses = 0;
3995
3996    Map<K, V> result = Maps.newLinkedHashMap();
3997    Set<K> keysToLoad = Sets.newLinkedHashSet();
3998    for (K key : keys) {
3999      V value = get(key);
4000      if (!result.containsKey(key)) {
4001        result.put(key, value);
4002        if (value == null) {
4003          misses++;
4004          keysToLoad.add(key);
4005        } else {
4006          hits++;
4007        }
4008      }
4009    }
4010
4011    try {
4012      if (!keysToLoad.isEmpty()) {
4013        try {
4014          Map<K, V> newEntries = loadAll(keysToLoad, defaultLoader);
4015          for (K key : keysToLoad) {
4016            V value = newEntries.get(key);
4017            if (value == null) {
4018              throw new InvalidCacheLoadException("loadAll failed to return a value for " + key);
4019            }
4020            result.put(key, value);
4021          }
4022        } catch (UnsupportedLoadingOperationException e) {
4023          // loadAll not implemented, fallback to load
4024          for (K key : keysToLoad) {
4025            misses--; // get will count this miss
4026            result.put(key, get(key, defaultLoader));
4027          }
4028        }
4029      }
4030      return ImmutableMap.copyOf(result);
4031    } finally {
4032      globalStatsCounter.recordHits(hits);
4033      globalStatsCounter.recordMisses(misses);
4034    }
4035  }
4036
4037  /**
4038   * Returns the result of calling {@link CacheLoader#loadAll}, or null if {@code loader} doesn't
4039   * implement {@code loadAll}.
4040   */
4041  @Nullable
4042  Map<K, V> loadAll(Set<? extends K> keys, CacheLoader<? super K, V> loader)
4043      throws ExecutionException {
4044    Stopwatch stopwatch = new Stopwatch().start();
4045    Map<K, V> result;
4046    boolean success = false;
4047    try {
4048      @SuppressWarnings("unchecked") // safe since all keys extend K
4049      Map<K, V> map = (Map<K, V>) loader.loadAll(keys);
4050      result = map;
4051      success = true;
4052    } catch (UnsupportedLoadingOperationException e) {
4053      success = true;
4054      throw e;
4055    } catch (RuntimeException e) {
4056      throw new UncheckedExecutionException(e);
4057    } catch (Exception e) {
4058      throw new ExecutionException(e);
4059    } catch (Error e) {
4060      throw new ExecutionError(e);
4061    } finally {
4062      if (!success) {
4063        globalStatsCounter.recordLoadException(stopwatch.elapsedTime(NANOSECONDS));
4064      }
4065    }
4066
4067    if (result == null) {
4068      globalStatsCounter.recordLoadException(stopwatch.elapsedTime(NANOSECONDS));
4069      throw new InvalidCacheLoadException(loader + " returned null map from loadAll");
4070    }
4071
4072    stopwatch.stop();
4073    // TODO(fry): batch by segment
4074    boolean nullsPresent = false;
4075    for (Map.Entry<K, V> entry : result.entrySet()) {
4076      K key = entry.getKey();
4077      V value = entry.getValue();
4078      if (key == null || value == null) {
4079        // delay failure until non-null entries are stored
4080        nullsPresent = true;
4081      } else {
4082        put(key, value);
4083      }
4084    }
4085
4086    if (nullsPresent) {
4087      globalStatsCounter.recordLoadException(stopwatch.elapsedTime(NANOSECONDS));
4088      throw new InvalidCacheLoadException(loader + " returned null keys or values from loadAll");
4089    }
4090
4091    // TODO(fry): record count of loaded entries
4092    globalStatsCounter.recordLoadSuccess(stopwatch.elapsedTime(NANOSECONDS));
4093    return result;
4094  }
4095
4096  /**
4097   * Returns the internal entry for the specified key. The entry may be loading, expired, or
4098   * partially collected.
4099   */
4100  ReferenceEntry<K, V> getEntry(@Nullable Object key) {
4101    // does not impact recency ordering
4102    if (key == null) {
4103      return null;
4104    }
4105    int hash = hash(key);
4106    return segmentFor(hash).getEntry(key, hash);
4107  }
4108
4109  /**
4110   * Returns the live internal entry for the specified key.
4111   */
4112  ReferenceEntry<K, V> getLiveEntry(@Nullable Object key) {
4113    // does not impact recency ordering
4114    if (key == null) {
4115      return null;
4116    }
4117    int hash = hash(key);
4118    return segmentFor(hash).getLiveEntry(key, hash, ticker.read());
4119  }
4120
4121  void refresh(K key) {
4122    int hash = hash(checkNotNull(key));
4123    segmentFor(hash).refresh(key, hash, defaultLoader);
4124  }
4125
4126  @Override
4127  public boolean containsKey(@Nullable Object key) {
4128    // does not impact recency ordering
4129    if (key == null) {
4130      return false;
4131    }
4132    int hash = hash(key);
4133    return segmentFor(hash).containsKey(key, hash);
4134  }
4135
4136  @Override
4137  public boolean containsValue(@Nullable Object value) {
4138    // does not impact recency ordering
4139    if (value == null) {
4140      return false;
4141    }
4142
4143    // This implementation is patterned after ConcurrentHashMap, but without the locking. The only
4144    // way for it to return a false negative would be for the target value to jump around in the map
4145    // such that none of the subsequent iterations observed it, despite the fact that at every point
4146    // in time it was present somewhere int the map. This becomes increasingly unlikely as
4147    // CONTAINS_VALUE_RETRIES increases, though without locking it is theoretically possible.
4148    long now = ticker.read();
4149    final Segment<K,V>[] segments = this.segments;
4150    long last = -1L;
4151    for (int i = 0; i < CONTAINS_VALUE_RETRIES; i++) {
4152      long sum = 0L;
4153      for (Segment<K, V> segment : segments) {
4154        // ensure visibility of most recent completed write
4155        @SuppressWarnings({"UnusedDeclaration", "unused"})
4156        int c = segment.count; // read-volatile
4157
4158        AtomicReferenceArray<ReferenceEntry<K, V>> table = segment.table;
4159        for (int j = 0 ; j < table.length(); j++) {
4160          for (ReferenceEntry<K, V> e = table.get(j); e != null; e = e.getNext()) {
4161            V v = segment.getLiveValue(e, now);
4162            if (v != null && valueEquivalence.equivalent(value, v)) {
4163              return true;
4164            }
4165          }
4166        }
4167        sum += segment.modCount;
4168      }
4169      if (sum == last) {
4170        break;
4171      }
4172      last = sum;
4173    }
4174    return false;
4175  }
4176
4177  @Override
4178  public V put(K key, V value) {
4179    checkNotNull(key);
4180    checkNotNull(value);
4181    int hash = hash(key);
4182    return segmentFor(hash).put(key, hash, value, false);
4183  }
4184
4185  @Override
4186  public V putIfAbsent(K key, V value) {
4187    checkNotNull(key);
4188    checkNotNull(value);
4189    int hash = hash(key);
4190    return segmentFor(hash).put(key, hash, value, true);
4191  }
4192
4193  @Override
4194  public void putAll(Map<? extends K, ? extends V> m) {
4195    for (Entry<? extends K, ? extends V> e : m.entrySet()) {
4196      put(e.getKey(), e.getValue());
4197    }
4198  }
4199
4200  @Override
4201  public V remove(@Nullable Object key) {
4202    if (key == null) {
4203      return null;
4204    }
4205    int hash = hash(key);
4206    return segmentFor(hash).remove(key, hash);
4207  }
4208
4209  @Override
4210  public boolean remove(@Nullable Object key, @Nullable Object value) {
4211    if (key == null || value == null) {
4212      return false;
4213    }
4214    int hash = hash(key);
4215    return segmentFor(hash).remove(key, hash, value);
4216  }
4217
4218  @Override
4219  public boolean replace(K key, @Nullable V oldValue, V newValue) {
4220    checkNotNull(key);
4221    checkNotNull(newValue);
4222    if (oldValue == null) {
4223      return false;
4224    }
4225    int hash = hash(key);
4226    return segmentFor(hash).replace(key, hash, oldValue, newValue);
4227  }
4228
4229  @Override
4230  public V replace(K key, V value) {
4231    checkNotNull(key);
4232    checkNotNull(value);
4233    int hash = hash(key);
4234    return segmentFor(hash).replace(key, hash, value);
4235  }
4236
4237  @Override
4238  public void clear() {
4239    for (Segment<K, V> segment : segments) {
4240      segment.clear();
4241    }
4242  }
4243
4244  void invalidateAll(Iterable<?> keys) {
4245    // TODO(fry): batch by segment
4246    for (Object key : keys) {
4247      remove(key);
4248    }
4249  }
4250
4251  Set<K> keySet;
4252
4253  @Override
4254  public Set<K> keySet() {
4255    // does not impact recency ordering
4256    Set<K> ks = keySet;
4257    return (ks != null) ? ks : (keySet = new KeySet());
4258  }
4259
4260  Collection<V> values;
4261
4262  @Override
4263  public Collection<V> values() {
4264    // does not impact recency ordering
4265    Collection<V> vs = values;
4266    return (vs != null) ? vs : (values = new Values());
4267  }
4268
4269  Set<Entry<K, V>> entrySet;
4270
4271  @Override
4272  public Set<Entry<K, V>> entrySet() {
4273    // does not impact recency ordering
4274    Set<Entry<K, V>> es = entrySet;
4275    return (es != null) ? es : (entrySet = new EntrySet());
4276  }
4277
4278  // Iterator Support
4279
4280  abstract class HashIterator {
4281
4282    int nextSegmentIndex;
4283    int nextTableIndex;
4284    Segment<K, V> currentSegment;
4285    AtomicReferenceArray<ReferenceEntry<K, V>> currentTable;
4286    ReferenceEntry<K, V> nextEntry;
4287    WriteThroughEntry nextExternal;
4288    WriteThroughEntry lastReturned;
4289
4290    HashIterator() {
4291      nextSegmentIndex = segments.length - 1;
4292      nextTableIndex = -1;
4293      advance();
4294    }
4295
4296    final void advance() {
4297      nextExternal = null;
4298
4299      if (nextInChain()) {
4300        return;
4301      }
4302
4303      if (nextInTable()) {
4304        return;
4305      }
4306
4307      while (nextSegmentIndex >= 0) {
4308        currentSegment = segments[nextSegmentIndex--];
4309        if (currentSegment.count != 0) {
4310          currentTable = currentSegment.table;
4311          nextTableIndex = currentTable.length() - 1;
4312          if (nextInTable()) {
4313            return;
4314          }
4315        }
4316      }
4317    }
4318
4319    /**
4320     * Finds the next entry in the current chain. Returns true if an entry was found.
4321     */
4322    boolean nextInChain() {
4323      if (nextEntry != null) {
4324        for (nextEntry = nextEntry.getNext(); nextEntry != null; nextEntry = nextEntry.getNext()) {
4325          if (advanceTo(nextEntry)) {
4326            return true;
4327          }
4328        }
4329      }
4330      return false;
4331    }
4332
4333    /**
4334     * Finds the next entry in the current table. Returns true if an entry was found.
4335     */
4336    boolean nextInTable() {
4337      while (nextTableIndex >= 0) {
4338        if ((nextEntry = currentTable.get(nextTableIndex--)) != null) {
4339          if (advanceTo(nextEntry) || nextInChain()) {
4340            return true;
4341          }
4342        }
4343      }
4344      return false;
4345    }
4346
4347    /**
4348     * Advances to the given entry. Returns true if the entry was valid, false if it should be
4349     * skipped.
4350     */
4351    boolean advanceTo(ReferenceEntry<K, V> entry) {
4352      try {
4353        long now = ticker.read();
4354        K key = entry.getKey();
4355        V value = getLiveValue(entry, now);
4356        if (value != null) {
4357          nextExternal = new WriteThroughEntry(key, value);
4358          return true;
4359        } else {
4360          // Skip stale entry.
4361          return false;
4362        }
4363      } finally {
4364        currentSegment.postReadCleanup();
4365      }
4366    }
4367
4368    public boolean hasNext() {
4369      return nextExternal != null;
4370    }
4371
4372    WriteThroughEntry nextEntry() {
4373      if (nextExternal == null) {
4374        throw new NoSuchElementException();
4375      }
4376      lastReturned = nextExternal;
4377      advance();
4378      return lastReturned;
4379    }
4380
4381    public void remove() {
4382      checkState(lastReturned != null);
4383      LocalCache.this.remove(lastReturned.getKey());
4384      lastReturned = null;
4385    }
4386  }
4387
4388  final class KeyIterator extends HashIterator implements Iterator<K> {
4389
4390    @Override
4391    public K next() {
4392      return nextEntry().getKey();
4393    }
4394  }
4395
4396  final class ValueIterator extends HashIterator implements Iterator<V> {
4397
4398    @Override
4399    public V next() {
4400      return nextEntry().getValue();
4401    }
4402  }
4403
4404  /**
4405   * Custom Entry class used by EntryIterator.next(), that relays setValue changes to the
4406   * underlying map.
4407   */
4408  final class WriteThroughEntry implements Entry<K, V> {
4409    final K key; // non-null
4410    V value; // non-null
4411
4412    WriteThroughEntry(K key, V value) {
4413      this.key = key;
4414      this.value = value;
4415    }
4416
4417    @Override
4418    public K getKey() {
4419      return key;
4420    }
4421
4422    @Override
4423    public V getValue() {
4424      return value;
4425    }
4426
4427    @Override
4428    public boolean equals(@Nullable Object object) {
4429      // Cannot use key and value equivalence
4430      if (object instanceof Entry) {
4431        Entry<?, ?> that = (Entry<?, ?>) object;
4432        return key.equals(that.getKey()) && value.equals(that.getValue());
4433      }
4434      return false;
4435    }
4436
4437    @Override
4438    public int hashCode() {
4439      // Cannot use key and value equivalence
4440      return key.hashCode() ^ value.hashCode();
4441    }
4442
4443    @Override
4444    public V setValue(V newValue) {
4445      throw new UnsupportedOperationException();
4446    }
4447
4448    /**
4449     * Returns a string representation of the form <code>{key}={value}</code>.
4450     */
4451    @Override public String toString() {
4452      return getKey() + "=" + getValue();
4453    }
4454  }
4455
4456  final class EntryIterator extends HashIterator implements Iterator<Entry<K, V>> {
4457
4458    @Override
4459    public Entry<K, V> next() {
4460      return nextEntry();
4461    }
4462  }
4463
4464  final class KeySet extends AbstractSet<K> {
4465
4466    @Override
4467    public Iterator<K> iterator() {
4468      return new KeyIterator();
4469    }
4470
4471    @Override
4472    public int size() {
4473      return LocalCache.this.size();
4474    }
4475
4476    @Override
4477    public boolean isEmpty() {
4478      return LocalCache.this.isEmpty();
4479    }
4480
4481    @Override
4482    public boolean contains(Object o) {
4483      return LocalCache.this.containsKey(o);
4484    }
4485
4486    @Override
4487    public boolean remove(Object o) {
4488      return LocalCache.this.remove(o) != null;
4489    }
4490
4491    @Override
4492    public void clear() {
4493      LocalCache.this.clear();
4494    }
4495  }
4496
4497  final class Values extends AbstractCollection<V> {
4498
4499    @Override
4500    public Iterator<V> iterator() {
4501      return new ValueIterator();
4502    }
4503
4504    @Override
4505    public int size() {
4506      return LocalCache.this.size();
4507    }
4508
4509    @Override
4510    public boolean isEmpty() {
4511      return LocalCache.this.isEmpty();
4512    }
4513
4514    @Override
4515    public boolean contains(Object o) {
4516      return LocalCache.this.containsValue(o);
4517    }
4518
4519    @Override
4520    public void clear() {
4521      LocalCache.this.clear();
4522    }
4523  }
4524
4525  final class EntrySet extends AbstractSet<Entry<K, V>> {
4526
4527    @Override
4528    public Iterator<Entry<K, V>> iterator() {
4529      return new EntryIterator();
4530    }
4531
4532    @Override
4533    public boolean contains(Object o) {
4534      if (!(o instanceof Entry)) {
4535        return false;
4536      }
4537      Entry<?, ?> e = (Entry<?, ?>) o;
4538      Object key = e.getKey();
4539      if (key == null) {
4540        return false;
4541      }
4542      V v = LocalCache.this.get(key);
4543
4544      return v != null && valueEquivalence.equivalent(e.getValue(), v);
4545    }
4546
4547    @Override
4548    public boolean remove(Object o) {
4549      if (!(o instanceof Entry)) {
4550        return false;
4551      }
4552      Entry<?, ?> e = (Entry<?, ?>) o;
4553      Object key = e.getKey();
4554      return key != null && LocalCache.this.remove(key, e.getValue());
4555    }
4556
4557    @Override
4558    public int size() {
4559      return LocalCache.this.size();
4560    }
4561
4562    @Override
4563    public boolean isEmpty() {
4564      return LocalCache.this.isEmpty();
4565    }
4566
4567    @Override
4568    public void clear() {
4569      LocalCache.this.clear();
4570    }
4571  }
4572
4573  // Serialization Support
4574
4575  /**
4576   * Serializes the configuration of a LocalCache, reconsitituting it as a Cache using
4577   * CacheBuilder upon deserialization. An instance of this class is fit for use by the writeReplace
4578   * of LocalManualCache.
4579   *
4580   * Unfortunately, readResolve() doesn't get called when a circular dependency is present, so the
4581   * proxy must be able to behave as the cache itself.
4582   */
4583  static class ManualSerializationProxy<K, V>
4584      extends ForwardingCache<K, V> implements Serializable {
4585    private static final long serialVersionUID = 1;
4586
4587    final Strength keyStrength;
4588    final Strength valueStrength;
4589    final Equivalence<Object> keyEquivalence;
4590    final Equivalence<Object> valueEquivalence;
4591    final long expireAfterWriteNanos;
4592    final long expireAfterAccessNanos;
4593    final long maxWeight;
4594    final Weigher<K, V> weigher;
4595    final int concurrencyLevel;
4596    final RemovalListener<? super K, ? super V> removalListener;
4597    final Ticker ticker;
4598    final CacheLoader<? super K, V> loader;
4599
4600    transient Cache<K, V> delegate;
4601
4602    ManualSerializationProxy(LocalCache<K, V> cache) {
4603      this(
4604          cache.keyStrength,
4605          cache.valueStrength,
4606          cache.keyEquivalence,
4607          cache.valueEquivalence,
4608          cache.expireAfterWriteNanos,
4609          cache.expireAfterAccessNanos,
4610          cache.maxWeight,
4611          cache.weigher,
4612          cache.concurrencyLevel,
4613          cache.removalListener,
4614          cache.ticker,
4615          cache.defaultLoader);
4616    }
4617
4618    private ManualSerializationProxy(
4619        Strength keyStrength, Strength valueStrength,
4620        Equivalence<Object> keyEquivalence, Equivalence<Object> valueEquivalence,
4621        long expireAfterWriteNanos, long expireAfterAccessNanos, long maxWeight,
4622        Weigher<K, V> weigher, int concurrencyLevel,
4623        RemovalListener<? super K, ? super V> removalListener,
4624        Ticker ticker, CacheLoader<? super K, V> loader) {
4625      this.keyStrength = keyStrength;
4626      this.valueStrength = valueStrength;
4627      this.keyEquivalence = keyEquivalence;
4628      this.valueEquivalence = valueEquivalence;
4629      this.expireAfterWriteNanos = expireAfterWriteNanos;
4630      this.expireAfterAccessNanos = expireAfterAccessNanos;
4631      this.maxWeight = maxWeight;
4632      this.weigher = weigher;
4633      this.concurrencyLevel = concurrencyLevel;
4634      this.removalListener = removalListener;
4635      this.ticker = (ticker == Ticker.systemTicker() || ticker == NULL_TICKER)
4636          ? null : ticker;
4637      this.loader = loader;
4638    }
4639
4640   CacheBuilder<Object, Object> recreateCacheBuilder() {
4641      CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder()
4642          .setKeyStrength(keyStrength)
4643          .setValueStrength(valueStrength)
4644          .keyEquivalence(keyEquivalence)
4645          .valueEquivalence(valueEquivalence)
4646          .concurrencyLevel(concurrencyLevel);
4647      builder.strictParsing = false;
4648      builder.removalListener(removalListener);
4649      if (expireAfterWriteNanos > 0) {
4650        builder.expireAfterWrite(expireAfterWriteNanos, TimeUnit.NANOSECONDS);
4651      }
4652      if (expireAfterAccessNanos > 0) {
4653        builder.expireAfterAccess(expireAfterAccessNanos, TimeUnit.NANOSECONDS);
4654      }
4655      if (weigher != OneWeigher.INSTANCE) {
4656        builder.weigher(weigher);
4657        if (maxWeight != UNSET_INT) {
4658          builder.maximumWeight(maxWeight);
4659        }
4660      } else {
4661        if (maxWeight != UNSET_INT) {
4662          builder.maximumSize(maxWeight);
4663        }
4664      }
4665      if (ticker != null) {
4666        builder.ticker(ticker);
4667      }
4668      return builder;
4669    }
4670
4671    private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
4672      in.defaultReadObject();
4673      CacheBuilder<Object, Object> builder = recreateCacheBuilder();
4674      this.delegate = builder.build();
4675    }
4676
4677    private Object readResolve() {
4678      return delegate;
4679    }
4680
4681    @Override
4682    protected Cache<K, V> delegate() {
4683      return delegate;
4684    }
4685  }
4686
4687  /**
4688   * Serializes the configuration of a LocalCache, reconsitituting it as an LoadingCache using
4689   * CacheBuilder upon deserialization. An instance of this class is fit for use by the writeReplace
4690   * of LocalLoadingCache.
4691   *
4692   * Unfortunately, readResolve() doesn't get called when a circular dependency is present, so the
4693   * proxy must be able to behave as the cache itself.
4694   */
4695  static final class LoadingSerializationProxy<K, V>
4696      extends ManualSerializationProxy<K, V> implements LoadingCache<K, V>, Serializable {
4697    private static final long serialVersionUID = 1;
4698
4699    transient LoadingCache<K, V> autoDelegate;
4700
4701    LoadingSerializationProxy(LocalCache<K, V> cache) {
4702      super(cache);
4703    }
4704
4705    private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
4706      in.defaultReadObject();
4707      CacheBuilder<Object, Object> builder = recreateCacheBuilder();
4708      this.autoDelegate = builder.build(loader);
4709    }
4710
4711    @Override
4712    public V get(K key) throws ExecutionException {
4713      return autoDelegate.get(key);
4714    }
4715
4716    @Override
4717    public V getUnchecked(K key) {
4718      return autoDelegate.getUnchecked(key);
4719    }
4720
4721    @Override
4722    public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException {
4723      return autoDelegate.getAll(keys);
4724    }
4725
4726    @Override
4727    public final V apply(K key) {
4728      return autoDelegate.apply(key);
4729    }
4730
4731    @Override
4732    public void refresh(K key) {
4733      autoDelegate.refresh(key);
4734    }
4735
4736    private Object readResolve() {
4737      return autoDelegate;
4738    }
4739  }
4740
4741  static class LocalManualCache<K, V> implements Cache<K, V>, Serializable {
4742    final LocalCache<K, V> localCache;
4743
4744    LocalManualCache(CacheBuilder<? super K, ? super V> builder) {
4745      this(builder, null);
4746    }
4747
4748    protected LocalManualCache(CacheBuilder<? super K, ? super V> builder,
4749        CacheLoader<? super K, V> loader) {
4750      this.localCache = new LocalCache<K, V>(builder, loader);
4751    }
4752
4753    // Cache methods
4754
4755    @Override
4756    @Nullable
4757    public V getIfPresent(K key) {
4758      return localCache.getIfPresent(key);
4759    }
4760
4761    @Override
4762    public V get(K key, final Callable<? extends V> valueLoader) throws ExecutionException {
4763      checkNotNull(valueLoader);
4764      return localCache.get(key, new CacheLoader<Object, V>() {
4765        @Override
4766        public V load(Object key) throws Exception {
4767          return valueLoader.call();
4768        }
4769      });
4770    }
4771
4772    @Override
4773    public ImmutableMap<K, V> getAllPresent(Iterable<? extends K> keys) {
4774      return localCache.getAllPresent(keys);
4775    }
4776
4777    @Override
4778    public void put(K key, V value) {
4779      localCache.put(key, value);
4780    }
4781
4782    @Override
4783    public void invalidate(Object key) {
4784      checkNotNull(key);
4785      localCache.remove(key);
4786    }
4787
4788    @Override
4789    public void invalidateAll(Iterable<?> keys) {
4790      localCache.invalidateAll(keys);
4791    }
4792
4793    @Override
4794    public void invalidateAll() {
4795      localCache.clear();
4796    }
4797
4798    @Override
4799    public long size() {
4800      return localCache.longSize();
4801    }
4802
4803    @Override
4804    public ConcurrentMap<K, V> asMap() {
4805      return localCache;
4806    }
4807
4808    @Override
4809    public CacheStats stats() {
4810      SimpleStatsCounter aggregator = new SimpleStatsCounter();
4811      aggregator.incrementBy(localCache.globalStatsCounter);
4812      for (Segment<K, V> segment : localCache.segments) {
4813        aggregator.incrementBy(segment.statsCounter);
4814      }
4815      return aggregator.snapshot();
4816    }
4817
4818    @Override
4819    public void cleanUp() {
4820      localCache.cleanUp();
4821    }
4822
4823    /*
4824     * These methods have been moved to LoadingCache, but they temporarily
4825     * remain in Cache in Guava.
4826     */
4827
4828    public V get(K key) throws ExecutionException {
4829      return localCache.getOrLoad(key);
4830    }
4831
4832    public V getUnchecked(K key) {
4833      try {
4834        return get(key);
4835      } catch (ExecutionException e) {
4836        throw new UncheckedExecutionException(e.getCause());
4837      }
4838    }
4839
4840    public final V apply(K key) {
4841      return getUnchecked(key);
4842    }
4843
4844    // Serialization Support
4845
4846    private static final long serialVersionUID = 1;
4847
4848    Object writeReplace() {
4849      return new ManualSerializationProxy<K, V>(localCache);
4850    }
4851  }
4852
4853  static class LocalLoadingCache<K, V>
4854      extends LocalManualCache<K, V> implements LoadingCache<K, V> {
4855
4856    LocalLoadingCache(CacheBuilder<? super K, ? super V> builder,
4857        CacheLoader<? super K, V> loader) {
4858      super(builder, checkNotNull(loader));
4859    }
4860
4861    // Cache methods
4862
4863    @Override
4864    public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException {
4865      return localCache.getAll(keys);
4866    }
4867
4868    @Override
4869    public void refresh(K key) {
4870      localCache.refresh(key);
4871    }
4872
4873    // Serialization Support
4874
4875    private static final long serialVersionUID = 1;
4876
4877    Object writeReplace() {
4878      return new LoadingSerializationProxy<K, V>(localCache);
4879    }
4880  }
4881}
4882