1/*
2 * Copyright (C) 2009 The Guava Authors
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package com.google.common.cache;
18
19import static com.google.common.base.Preconditions.checkNotNull;
20import static com.google.common.base.Preconditions.checkState;
21import static com.google.common.cache.CacheBuilder.NULL_TICKER;
22import static com.google.common.cache.CacheBuilder.UNSET_INT;
23import static com.google.common.util.concurrent.MoreExecutors.directExecutor;
24import static com.google.common.util.concurrent.Uninterruptibles.getUninterruptibly;
25import static java.util.concurrent.TimeUnit.NANOSECONDS;
26
27import com.google.common.annotations.GwtCompatible;
28import com.google.common.annotations.GwtIncompatible;
29import com.google.common.annotations.VisibleForTesting;
30import com.google.common.base.Equivalence;
31import com.google.common.base.Function;
32import com.google.common.base.Stopwatch;
33import com.google.common.base.Ticker;
34import com.google.common.cache.AbstractCache.SimpleStatsCounter;
35import com.google.common.cache.AbstractCache.StatsCounter;
36import com.google.common.cache.CacheBuilder.NullListener;
37import com.google.common.cache.CacheBuilder.OneWeigher;
38import com.google.common.cache.CacheLoader.InvalidCacheLoadException;
39import com.google.common.cache.CacheLoader.UnsupportedLoadingOperationException;
40import com.google.common.collect.AbstractSequentialIterator;
41import com.google.common.collect.ImmutableMap;
42import com.google.common.collect.ImmutableSet;
43import com.google.common.collect.Maps;
44import com.google.common.collect.Sets;
45import com.google.common.primitives.Ints;
46import com.google.common.util.concurrent.ExecutionError;
47import com.google.common.util.concurrent.Futures;
48import com.google.common.util.concurrent.ListenableFuture;
49import com.google.common.util.concurrent.SettableFuture;
50import com.google.common.util.concurrent.UncheckedExecutionException;
51import com.google.common.util.concurrent.Uninterruptibles;
52
53import java.io.IOException;
54import java.io.ObjectInputStream;
55import java.io.Serializable;
56import java.lang.ref.Reference;
57import java.lang.ref.ReferenceQueue;
58import java.lang.ref.SoftReference;
59import java.lang.ref.WeakReference;
60import java.util.AbstractCollection;
61import java.util.AbstractMap;
62import java.util.AbstractQueue;
63import java.util.AbstractSet;
64import java.util.Collection;
65import java.util.Iterator;
66import java.util.Map;
67import java.util.Map.Entry;
68import java.util.NoSuchElementException;
69import java.util.Queue;
70import java.util.Set;
71import java.util.concurrent.Callable;
72import java.util.concurrent.ConcurrentLinkedQueue;
73import java.util.concurrent.ConcurrentMap;
74import java.util.concurrent.ExecutionException;
75import java.util.concurrent.TimeUnit;
76import java.util.concurrent.atomic.AtomicInteger;
77import java.util.concurrent.atomic.AtomicReferenceArray;
78import java.util.concurrent.locks.ReentrantLock;
79import java.util.logging.Level;
80import java.util.logging.Logger;
81
82import javax.annotation.Nullable;
83import javax.annotation.concurrent.GuardedBy;
84
85/**
86 * The concurrent hash map implementation built by {@link CacheBuilder}.
87 *
88 * <p>This implementation is heavily derived from revision 1.96 of <a
89 * href="http://tinyurl.com/ConcurrentHashMap">ConcurrentHashMap.java</a>.
90 *
91 * @author Charles Fry
92 * @author Bob Lee ({@code com.google.common.collect.MapMaker})
93 * @author Doug Lea ({@code ConcurrentHashMap})
94 */
95@GwtCompatible(emulated = true)
96class LocalCache<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> {
97
98  /*
99   * The basic strategy is to subdivide the table among Segments, each of which itself is a
100   * concurrently readable hash table. The map supports non-blocking reads and concurrent writes
101   * across different segments.
102   *
103   * If a maximum size is specified, a best-effort bounding is performed per segment, using a
104   * page-replacement algorithm to determine which entries to evict when the capacity has been
105   * exceeded.
106   *
107   * The page replacement algorithm's data structures are kept casually consistent with the map. The
108   * ordering of writes to a segment is sequentially consistent. An update to the map and recording
109   * of reads may not be immediately reflected on the algorithm's data structures. These structures
110   * are guarded by a lock and operations are applied in batches to avoid lock contention. The
111   * penalty of applying the batches is spread across threads so that the amortized cost is slightly
112   * higher than performing just the operation without enforcing the capacity constraint.
113   *
114   * This implementation uses a per-segment queue to record a memento of the additions, removals,
115   * and accesses that were performed on the map. The queue is drained on writes and when it exceeds
116   * its capacity threshold.
117   *
118   * The Least Recently Used page replacement algorithm was chosen due to its simplicity, high hit
119   * rate, and ability to be implemented with O(1) time complexity. The initial LRU implementation
120   * operates per-segment rather than globally for increased implementation simplicity. We expect
121   * the cache hit rate to be similar to that of a global LRU algorithm.
122   */
123
124  // Constants
125
126  /**
127   * The maximum capacity, used if a higher value is implicitly specified by either of the
128   * constructors with arguments. MUST be a power of two <= 1<<30 to ensure that entries are
129   * indexable using ints.
130   */
131  static final int MAXIMUM_CAPACITY = 1 << 30;
132
133  /** The maximum number of segments to allow; used to bound constructor arguments. */
134  static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
135
136  /** Number of (unsynchronized) retries in the containsValue method. */
137  static final int CONTAINS_VALUE_RETRIES = 3;
138
139  /**
140   * Number of cache access operations that can be buffered per segment before the cache's recency
141   * ordering information is updated. This is used to avoid lock contention by recording a memento
142   * of reads and delaying a lock acquisition until the threshold is crossed or a mutation occurs.
143   *
144   * <p>This must be a (2^n)-1 as it is used as a mask.
145   */
146  static final int DRAIN_THRESHOLD = 0x3F;
147
148  /**
149   * Maximum number of entries to be drained in a single cleanup run. This applies independently to
150   * the cleanup queue and both reference queues.
151   */
152  // TODO(fry): empirically optimize this
153  static final int DRAIN_MAX = 16;
154
155  // Fields
156
157  static final Logger logger = Logger.getLogger(LocalCache.class.getName());
158
159  /**
160   * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose
161   * the segment.
162   */
163  final int segmentMask;
164
165  /**
166   * Shift value for indexing within segments. Helps prevent entries that end up in the same segment
167   * from also ending up in the same bucket.
168   */
169  final int segmentShift;
170
171  /** The segments, each of which is a specialized hash table. */
172  final Segment<K, V>[] segments;
173
174  /** The concurrency level. */
175  final int concurrencyLevel;
176
177  /** Strategy for comparing keys. */
178  final Equivalence<Object> keyEquivalence;
179
180  /** Strategy for comparing values. */
181  final Equivalence<Object> valueEquivalence;
182
183  /** Strategy for referencing keys. */
184  final Strength keyStrength;
185
186  /** Strategy for referencing values. */
187  final Strength valueStrength;
188
189  /** The maximum weight of this map. UNSET_INT if there is no maximum. */
190  final long maxWeight;
191
192  /** Weigher to weigh cache entries. */
193  final Weigher<K, V> weigher;
194
195  /** How long after the last access to an entry the map will retain that entry. */
196  final long expireAfterAccessNanos;
197
198  /** How long after the last write to an entry the map will retain that entry. */
199  final long expireAfterWriteNanos;
200
201  /** How long after the last write an entry becomes a candidate for refresh. */
202  final long refreshNanos;
203
204  /** Entries waiting to be consumed by the removal listener. */
205  // TODO(fry): define a new type which creates event objects and automates the clear logic
206  final Queue<RemovalNotification<K, V>> removalNotificationQueue;
207
208  /**
209   * A listener that is invoked when an entry is removed due to expiration or garbage collection of
210   * soft/weak entries.
211   */
212  final RemovalListener<K, V> removalListener;
213
214  /** Measures time in a testable way. */
215  final Ticker ticker;
216
217  /** Factory used to create new entries. */
218  final EntryFactory entryFactory;
219
220  /**
221   * Accumulates global cache statistics. Note that there are also per-segments stats counters
222   * which must be aggregated to obtain a global stats view.
223   */
224  final StatsCounter globalStatsCounter;
225
226  /**
227   * The default cache loader to use on loading operations.
228   */
229  @Nullable
230  final CacheLoader<? super K, V> defaultLoader;
231
232  /**
233   * Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
234   */
235  LocalCache(
236      CacheBuilder<? super K, ? super V> builder, @Nullable CacheLoader<? super K, V> loader) {
237    concurrencyLevel = Math.min(builder.getConcurrencyLevel(), MAX_SEGMENTS);
238
239    keyStrength = builder.getKeyStrength();
240    valueStrength = builder.getValueStrength();
241
242    keyEquivalence = builder.getKeyEquivalence();
243    valueEquivalence = builder.getValueEquivalence();
244
245    maxWeight = builder.getMaximumWeight();
246    weigher = builder.getWeigher();
247    expireAfterAccessNanos = builder.getExpireAfterAccessNanos();
248    expireAfterWriteNanos = builder.getExpireAfterWriteNanos();
249    refreshNanos = builder.getRefreshNanos();
250
251    removalListener = builder.getRemovalListener();
252    removalNotificationQueue = (removalListener == NullListener.INSTANCE)
253        ? LocalCache.<RemovalNotification<K, V>>discardingQueue()
254        : new ConcurrentLinkedQueue<RemovalNotification<K, V>>();
255
256    ticker = builder.getTicker(recordsTime());
257    entryFactory = EntryFactory.getFactory(keyStrength, usesAccessEntries(), usesWriteEntries());
258    globalStatsCounter = builder.getStatsCounterSupplier().get();
259    defaultLoader = loader;
260
261    int initialCapacity = Math.min(builder.getInitialCapacity(), MAXIMUM_CAPACITY);
262    if (evictsBySize() && !customWeigher()) {
263      initialCapacity = Math.min(initialCapacity, (int) maxWeight);
264    }
265
266    // Find the lowest power-of-two segmentCount that exceeds concurrencyLevel, unless
267    // maximumSize/Weight is specified in which case ensure that each segment gets at least 10
268    // entries. The special casing for size-based eviction is only necessary because that eviction
269    // happens per segment instead of globally, so too many segments compared to the maximum size
270    // will result in random eviction behavior.
271    int segmentShift = 0;
272    int segmentCount = 1;
273    while (segmentCount < concurrencyLevel
274           && (!evictsBySize() || segmentCount * 20 <= maxWeight)) {
275      ++segmentShift;
276      segmentCount <<= 1;
277    }
278    this.segmentShift = 32 - segmentShift;
279    segmentMask = segmentCount - 1;
280
281    this.segments = newSegmentArray(segmentCount);
282
283    int segmentCapacity = initialCapacity / segmentCount;
284    if (segmentCapacity * segmentCount < initialCapacity) {
285      ++segmentCapacity;
286    }
287
288    int segmentSize = 1;
289    while (segmentSize < segmentCapacity) {
290      segmentSize <<= 1;
291    }
292
293    if (evictsBySize()) {
294      // Ensure sum of segment max weights = overall max weights
295      long maxSegmentWeight = maxWeight / segmentCount + 1;
296      long remainder = maxWeight % segmentCount;
297      for (int i = 0; i < this.segments.length; ++i) {
298        if (i == remainder) {
299          maxSegmentWeight--;
300        }
301        this.segments[i] =
302            createSegment(segmentSize, maxSegmentWeight, builder.getStatsCounterSupplier().get());
303      }
304    } else {
305      for (int i = 0; i < this.segments.length; ++i) {
306        this.segments[i] =
307            createSegment(segmentSize, UNSET_INT, builder.getStatsCounterSupplier().get());
308      }
309    }
310  }
311
312  boolean evictsBySize() {
313    return maxWeight >= 0;
314  }
315
316  boolean customWeigher() {
317    return weigher != OneWeigher.INSTANCE;
318  }
319
320  boolean expires() {
321    return expiresAfterWrite() || expiresAfterAccess();
322  }
323
324  boolean expiresAfterWrite() {
325    return expireAfterWriteNanos > 0;
326  }
327
328  boolean expiresAfterAccess() {
329    return expireAfterAccessNanos > 0;
330  }
331
332  boolean refreshes() {
333    return refreshNanos > 0;
334  }
335
336  boolean usesAccessQueue() {
337    return expiresAfterAccess() || evictsBySize();
338  }
339
340  boolean usesWriteQueue() {
341    return expiresAfterWrite();
342  }
343
344  boolean recordsWrite() {
345    return expiresAfterWrite() || refreshes();
346  }
347
348  boolean recordsAccess() {
349    return expiresAfterAccess();
350  }
351
352  boolean recordsTime() {
353    return recordsWrite() || recordsAccess();
354  }
355
356  boolean usesWriteEntries() {
357    return usesWriteQueue() || recordsWrite();
358  }
359
360  boolean usesAccessEntries() {
361    return usesAccessQueue() || recordsAccess();
362  }
363
364  boolean usesKeyReferences() {
365    return keyStrength != Strength.STRONG;
366  }
367
368  boolean usesValueReferences() {
369    return valueStrength != Strength.STRONG;
370  }
371
372  enum Strength {
373    /*
374     * TODO(kevinb): If we strongly reference the value and aren't loading, we needn't wrap the
375     * value. This could save ~8 bytes per entry.
376     */
377
378    STRONG {
379      @Override
380      <K, V> ValueReference<K, V> referenceValue(
381          Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) {
382        return (weight == 1)
383            ? new StrongValueReference<K, V>(value)
384            : new WeightedStrongValueReference<K, V>(value, weight);
385      }
386
387      @Override
388      Equivalence<Object> defaultEquivalence() {
389        return Equivalence.equals();
390      }
391    },
392
393    SOFT {
394      @Override
395      <K, V> ValueReference<K, V> referenceValue(
396          Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) {
397        return (weight == 1)
398            ? new SoftValueReference<K, V>(segment.valueReferenceQueue, value, entry)
399            : new WeightedSoftValueReference<K, V>(
400                segment.valueReferenceQueue, value, entry, weight);
401      }
402
403      @Override
404      Equivalence<Object> defaultEquivalence() {
405        return Equivalence.identity();
406      }
407    },
408
409    WEAK {
410      @Override
411      <K, V> ValueReference<K, V> referenceValue(
412          Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) {
413        return (weight == 1)
414            ? new WeakValueReference<K, V>(segment.valueReferenceQueue, value, entry)
415            : new WeightedWeakValueReference<K, V>(
416                segment.valueReferenceQueue, value, entry, weight);
417      }
418
419      @Override
420      Equivalence<Object> defaultEquivalence() {
421        return Equivalence.identity();
422      }
423    };
424
425    /**
426     * Creates a reference for the given value according to this value strength.
427     */
428    abstract <K, V> ValueReference<K, V> referenceValue(
429        Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight);
430
431    /**
432     * Returns the default equivalence strategy used to compare and hash keys or values referenced
433     * at this strength. This strategy will be used unless the user explicitly specifies an
434     * alternate strategy.
435     */
436    abstract Equivalence<Object> defaultEquivalence();
437  }
438
439  /**
440   * Creates new entries.
441   */
442  enum EntryFactory {
443    STRONG {
444      @Override
445      <K, V> ReferenceEntry<K, V> newEntry(
446          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
447        return new StrongEntry<K, V>(key, hash, next);
448      }
449    },
450    STRONG_ACCESS {
451      @Override
452      <K, V> ReferenceEntry<K, V> newEntry(
453          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
454        return new StrongAccessEntry<K, V>(key, hash, next);
455      }
456
457      @Override
458      <K, V> ReferenceEntry<K, V> copyEntry(
459          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
460        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
461        copyAccessEntry(original, newEntry);
462        return newEntry;
463      }
464    },
465    STRONG_WRITE {
466      @Override
467      <K, V> ReferenceEntry<K, V> newEntry(
468          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
469        return new StrongWriteEntry<K, V>(key, hash, next);
470      }
471
472      @Override
473      <K, V> ReferenceEntry<K, V> copyEntry(
474          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
475        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
476        copyWriteEntry(original, newEntry);
477        return newEntry;
478      }
479    },
480    STRONG_ACCESS_WRITE {
481      @Override
482      <K, V> ReferenceEntry<K, V> newEntry(
483          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
484        return new StrongAccessWriteEntry<K, V>(key, hash, next);
485      }
486
487      @Override
488      <K, V> ReferenceEntry<K, V> copyEntry(
489          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
490        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
491        copyAccessEntry(original, newEntry);
492        copyWriteEntry(original, newEntry);
493        return newEntry;
494      }
495    },
496
497    WEAK {
498      @Override
499      <K, V> ReferenceEntry<K, V> newEntry(
500          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
501        return new WeakEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
502      }
503    },
504    WEAK_ACCESS {
505      @Override
506      <K, V> ReferenceEntry<K, V> newEntry(
507          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
508        return new WeakAccessEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
509      }
510
511      @Override
512      <K, V> ReferenceEntry<K, V> copyEntry(
513          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
514        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
515        copyAccessEntry(original, newEntry);
516        return newEntry;
517      }
518    },
519    WEAK_WRITE {
520      @Override
521      <K, V> ReferenceEntry<K, V> newEntry(
522          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
523        return new WeakWriteEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
524      }
525
526      @Override
527      <K, V> ReferenceEntry<K, V> copyEntry(
528          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
529        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
530        copyWriteEntry(original, newEntry);
531        return newEntry;
532      }
533    },
534    WEAK_ACCESS_WRITE {
535      @Override
536      <K, V> ReferenceEntry<K, V> newEntry(
537          Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
538        return new WeakAccessWriteEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
539      }
540
541      @Override
542      <K, V> ReferenceEntry<K, V> copyEntry(
543          Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
544        ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
545        copyAccessEntry(original, newEntry);
546        copyWriteEntry(original, newEntry);
547        return newEntry;
548      }
549    };
550
551    /**
552     * Masks used to compute indices in the following table.
553     */
554    static final int ACCESS_MASK = 1;
555    static final int WRITE_MASK = 2;
556    static final int WEAK_MASK = 4;
557
558    /**
559     * Look-up table for factories.
560     */
561    static final EntryFactory[] factories = {
562      STRONG, STRONG_ACCESS, STRONG_WRITE, STRONG_ACCESS_WRITE,
563      WEAK, WEAK_ACCESS, WEAK_WRITE, WEAK_ACCESS_WRITE,
564    };
565
566    static EntryFactory getFactory(Strength keyStrength, boolean usesAccessQueue,
567        boolean usesWriteQueue) {
568      int flags = ((keyStrength == Strength.WEAK) ? WEAK_MASK : 0)
569          | (usesAccessQueue ? ACCESS_MASK : 0)
570          | (usesWriteQueue ? WRITE_MASK : 0);
571      return factories[flags];
572    }
573
574    /**
575     * Creates a new entry.
576     *
577     * @param segment to create the entry for
578     * @param key of the entry
579     * @param hash of the key
580     * @param next entry in the same bucket
581     */
582    abstract <K, V> ReferenceEntry<K, V> newEntry(
583        Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next);
584
585    /**
586     * Copies an entry, assigning it a new {@code next} entry.
587     *
588     * @param original the entry to copy
589     * @param newNext entry in the same bucket
590     */
591    // Guarded By Segment.this
592    <K, V> ReferenceEntry<K, V> copyEntry(
593        Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
594      return newEntry(segment, original.getKey(), original.getHash(), newNext);
595    }
596
597    // Guarded By Segment.this
598    <K, V> void copyAccessEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) {
599      // TODO(fry): when we link values instead of entries this method can go
600      // away, as can connectAccessOrder, nullifyAccessOrder.
601      newEntry.setAccessTime(original.getAccessTime());
602
603      connectAccessOrder(original.getPreviousInAccessQueue(), newEntry);
604      connectAccessOrder(newEntry, original.getNextInAccessQueue());
605
606      nullifyAccessOrder(original);
607    }
608
609    // Guarded By Segment.this
610    <K, V> void copyWriteEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) {
611      // TODO(fry): when we link values instead of entries this method can go
612      // away, as can connectWriteOrder, nullifyWriteOrder.
613      newEntry.setWriteTime(original.getWriteTime());
614
615      connectWriteOrder(original.getPreviousInWriteQueue(), newEntry);
616      connectWriteOrder(newEntry, original.getNextInWriteQueue());
617
618      nullifyWriteOrder(original);
619    }
620  }
621
622  /**
623   * A reference to a value.
624   */
625  interface ValueReference<K, V> {
626    /**
627     * Returns the value. Does not block or throw exceptions.
628     */
629    @Nullable
630    V get();
631
632    /**
633     * Waits for a value that may still be loading. Unlike get(), this method can block (in the
634     * case of FutureValueReference).
635     *
636     * @throws ExecutionException if the loading thread throws an exception
637     * @throws ExecutionError if the loading thread throws an error
638     */
639    V waitForValue() throws ExecutionException;
640
641    /**
642     * Returns the weight of this entry. This is assumed to be static between calls to setValue.
643     */
644    int getWeight();
645
646    /**
647     * Returns the entry associated with this value reference, or {@code null} if this value
648     * reference is independent of any entry.
649     */
650    @Nullable
651    ReferenceEntry<K, V> getEntry();
652
653    /**
654     * Creates a copy of this reference for the given entry.
655     *
656     * <p>{@code value} may be null only for a loading reference.
657     */
658    ValueReference<K, V> copyFor(
659        ReferenceQueue<V> queue, @Nullable V value, ReferenceEntry<K, V> entry);
660
661    /**
662     * Notifify pending loads that a new value was set. This is only relevant to loading
663     * value references.
664     */
665    void notifyNewValue(@Nullable V newValue);
666
667    /**
668     * Returns true if a new value is currently loading, regardless of whether or not there is an
669     * existing value. It is assumed that the return value of this method is constant for any given
670     * ValueReference instance.
671     */
672    boolean isLoading();
673
674    /**
675     * Returns true if this reference contains an active value, meaning one that is still considered
676     * present in the cache. Active values consist of live values, which are returned by cache
677     * lookups, and dead values, which have been evicted but awaiting removal. Non-active values
678     * consist strictly of loading values, though during refresh a value may be both active and
679     * loading.
680     */
681    boolean isActive();
682  }
683
684  /**
685   * Placeholder. Indicates that the value hasn't been set yet.
686   */
687  static final ValueReference<Object, Object> UNSET = new ValueReference<Object, Object>() {
688    @Override
689    public Object get() {
690      return null;
691    }
692
693    @Override
694    public int getWeight() {
695      return 0;
696    }
697
698    @Override
699    public ReferenceEntry<Object, Object> getEntry() {
700      return null;
701    }
702
703    @Override
704    public ValueReference<Object, Object> copyFor(ReferenceQueue<Object> queue,
705        @Nullable Object value, ReferenceEntry<Object, Object> entry) {
706      return this;
707    }
708
709    @Override
710    public boolean isLoading() {
711      return false;
712    }
713
714    @Override
715    public boolean isActive() {
716      return false;
717    }
718
719    @Override
720    public Object waitForValue() {
721      return null;
722    }
723
724    @Override
725    public void notifyNewValue(Object newValue) {}
726  };
727
728  /**
729   * Singleton placeholder that indicates a value is being loaded.
730   */
731  @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
732  static <K, V> ValueReference<K, V> unset() {
733    return (ValueReference<K, V>) UNSET;
734  }
735
736  /**
737   * An entry in a reference map.
738   *
739   * Entries in the map can be in the following states:
740   *
741   * Valid:
742   * - Live: valid key/value are set
743   * - Loading: loading is pending
744   *
745   * Invalid:
746   * - Expired: time expired (key/value may still be set)
747   * - Collected: key/value was partially collected, but not yet cleaned up
748   * - Unset: marked as unset, awaiting cleanup or reuse
749   */
750  interface ReferenceEntry<K, V> {
751    /**
752     * Returns the value reference from this entry.
753     */
754    ValueReference<K, V> getValueReference();
755
756    /**
757     * Sets the value reference for this entry.
758     */
759    void setValueReference(ValueReference<K, V> valueReference);
760
761    /**
762     * Returns the next entry in the chain.
763     */
764    @Nullable
765    ReferenceEntry<K, V> getNext();
766
767    /**
768     * Returns the entry's hash.
769     */
770    int getHash();
771
772    /**
773     * Returns the key for this entry.
774     */
775    @Nullable
776    K getKey();
777
778    /*
779     * Used by entries that use access order. Access entries are maintained in a doubly-linked list.
780     * New entries are added at the tail of the list at write time; stale entries are expired from
781     * the head of the list.
782     */
783
784    /**
785     * Returns the time that this entry was last accessed, in ns.
786     */
787    long getAccessTime();
788
789    /**
790     * Sets the entry access time in ns.
791     */
792    void setAccessTime(long time);
793
794    /**
795     * Returns the next entry in the access queue.
796     */
797    ReferenceEntry<K, V> getNextInAccessQueue();
798
799    /**
800     * Sets the next entry in the access queue.
801     */
802    void setNextInAccessQueue(ReferenceEntry<K, V> next);
803
804    /**
805     * Returns the previous entry in the access queue.
806     */
807    ReferenceEntry<K, V> getPreviousInAccessQueue();
808
809    /**
810     * Sets the previous entry in the access queue.
811     */
812    void setPreviousInAccessQueue(ReferenceEntry<K, V> previous);
813
814    /*
815     * Implemented by entries that use write order. Write entries are maintained in a
816     * doubly-linked list. New entries are added at the tail of the list at write time and stale
817     * entries are expired from the head of the list.
818     */
819
820    /**
821     * Returns the time that this entry was last written, in ns.
822     */
823    long getWriteTime();
824
825    /**
826     * Sets the entry write time in ns.
827     */
828    void setWriteTime(long time);
829
830    /**
831     * Returns the next entry in the write queue.
832     */
833    ReferenceEntry<K, V> getNextInWriteQueue();
834
835    /**
836     * Sets the next entry in the write queue.
837     */
838    void setNextInWriteQueue(ReferenceEntry<K, V> next);
839
840    /**
841     * Returns the previous entry in the write queue.
842     */
843    ReferenceEntry<K, V> getPreviousInWriteQueue();
844
845    /**
846     * Sets the previous entry in the write queue.
847     */
848    void setPreviousInWriteQueue(ReferenceEntry<K, V> previous);
849  }
850
851  private enum NullEntry implements ReferenceEntry<Object, Object> {
852    INSTANCE;
853
854    @Override
855    public ValueReference<Object, Object> getValueReference() {
856      return null;
857    }
858
859    @Override
860    public void setValueReference(ValueReference<Object, Object> valueReference) {}
861
862    @Override
863    public ReferenceEntry<Object, Object> getNext() {
864      return null;
865    }
866
867    @Override
868    public int getHash() {
869      return 0;
870    }
871
872    @Override
873    public Object getKey() {
874      return null;
875    }
876
877    @Override
878    public long getAccessTime() {
879      return 0;
880    }
881
882    @Override
883    public void setAccessTime(long time) {}
884
885    @Override
886    public ReferenceEntry<Object, Object> getNextInAccessQueue() {
887      return this;
888    }
889
890    @Override
891    public void setNextInAccessQueue(ReferenceEntry<Object, Object> next) {}
892
893    @Override
894    public ReferenceEntry<Object, Object> getPreviousInAccessQueue() {
895      return this;
896    }
897
898    @Override
899    public void setPreviousInAccessQueue(ReferenceEntry<Object, Object> previous) {}
900
901    @Override
902    public long getWriteTime() {
903      return 0;
904    }
905
906    @Override
907    public void setWriteTime(long time) {}
908
909    @Override
910    public ReferenceEntry<Object, Object> getNextInWriteQueue() {
911      return this;
912    }
913
914    @Override
915    public void setNextInWriteQueue(ReferenceEntry<Object, Object> next) {}
916
917    @Override
918    public ReferenceEntry<Object, Object> getPreviousInWriteQueue() {
919      return this;
920    }
921
922    @Override
923    public void setPreviousInWriteQueue(ReferenceEntry<Object, Object> previous) {}
924  }
925
926  static abstract class AbstractReferenceEntry<K, V> implements ReferenceEntry<K, V> {
927    @Override
928    public ValueReference<K, V> getValueReference() {
929      throw new UnsupportedOperationException();
930    }
931
932    @Override
933    public void setValueReference(ValueReference<K, V> valueReference) {
934      throw new UnsupportedOperationException();
935    }
936
937    @Override
938    public ReferenceEntry<K, V> getNext() {
939      throw new UnsupportedOperationException();
940    }
941
942    @Override
943    public int getHash() {
944      throw new UnsupportedOperationException();
945    }
946
947    @Override
948    public K getKey() {
949      throw new UnsupportedOperationException();
950    }
951
952    @Override
953    public long getAccessTime() {
954      throw new UnsupportedOperationException();
955    }
956
957    @Override
958    public void setAccessTime(long time) {
959      throw new UnsupportedOperationException();
960    }
961
962    @Override
963    public ReferenceEntry<K, V> getNextInAccessQueue() {
964      throw new UnsupportedOperationException();
965    }
966
967    @Override
968    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
969      throw new UnsupportedOperationException();
970    }
971
972    @Override
973    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
974      throw new UnsupportedOperationException();
975    }
976
977    @Override
978    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
979      throw new UnsupportedOperationException();
980    }
981
982    @Override
983    public long getWriteTime() {
984      throw new UnsupportedOperationException();
985    }
986
987    @Override
988    public void setWriteTime(long time) {
989      throw new UnsupportedOperationException();
990    }
991
992    @Override
993    public ReferenceEntry<K, V> getNextInWriteQueue() {
994      throw new UnsupportedOperationException();
995    }
996
997    @Override
998    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
999      throw new UnsupportedOperationException();
1000    }
1001
1002    @Override
1003    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1004      throw new UnsupportedOperationException();
1005    }
1006
1007    @Override
1008    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1009      throw new UnsupportedOperationException();
1010    }
1011  }
1012
1013  @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
1014  static <K, V> ReferenceEntry<K, V> nullEntry() {
1015    return (ReferenceEntry<K, V>) NullEntry.INSTANCE;
1016  }
1017
1018  static final Queue<? extends Object> DISCARDING_QUEUE = new AbstractQueue<Object>() {
1019    @Override
1020    public boolean offer(Object o) {
1021      return true;
1022    }
1023
1024    @Override
1025    public Object peek() {
1026      return null;
1027    }
1028
1029    @Override
1030    public Object poll() {
1031      return null;
1032    }
1033
1034    @Override
1035    public int size() {
1036      return 0;
1037    }
1038
1039    @Override
1040    public Iterator<Object> iterator() {
1041      return ImmutableSet.of().iterator();
1042    }
1043  };
1044
1045  /**
1046   * Queue that discards all elements.
1047   */
1048  @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
1049  static <E> Queue<E> discardingQueue() {
1050    return (Queue) DISCARDING_QUEUE;
1051  }
1052
1053  /*
1054   * Note: All of this duplicate code sucks, but it saves a lot of memory. If only Java had mixins!
1055   * To maintain this code, make a change for the strong reference type. Then, cut and paste, and
1056   * replace "Strong" with "Soft" or "Weak" within the pasted text. The primary difference is that
1057   * strong entries store the key reference directly while soft and weak entries delegate to their
1058   * respective superclasses.
1059   */
1060
1061  /**
1062   * Used for strongly-referenced keys.
1063   */
1064  static class StrongEntry<K, V> extends AbstractReferenceEntry<K, V> {
1065    final K key;
1066
1067    StrongEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1068      this.key = key;
1069      this.hash = hash;
1070      this.next = next;
1071    }
1072
1073    @Override
1074    public K getKey() {
1075      return this.key;
1076    }
1077
1078    // The code below is exactly the same for each entry type.
1079
1080    final int hash;
1081    final ReferenceEntry<K, V> next;
1082    volatile ValueReference<K, V> valueReference = unset();
1083
1084    @Override
1085    public ValueReference<K, V> getValueReference() {
1086      return valueReference;
1087    }
1088
1089    @Override
1090    public void setValueReference(ValueReference<K, V> valueReference) {
1091      this.valueReference = valueReference;
1092    }
1093
1094    @Override
1095    public int getHash() {
1096      return hash;
1097    }
1098
1099    @Override
1100    public ReferenceEntry<K, V> getNext() {
1101      return next;
1102    }
1103  }
1104
1105  static final class StrongAccessEntry<K, V> extends StrongEntry<K, V> {
1106    StrongAccessEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1107      super(key, hash, next);
1108    }
1109
1110    // The code below is exactly the same for each access entry type.
1111
1112    volatile long accessTime = Long.MAX_VALUE;
1113
1114    @Override
1115    public long getAccessTime() {
1116      return accessTime;
1117    }
1118
1119    @Override
1120    public void setAccessTime(long time) {
1121      this.accessTime = time;
1122    }
1123
1124    // Guarded By Segment.this
1125    ReferenceEntry<K, V> nextAccess = nullEntry();
1126
1127    @Override
1128    public ReferenceEntry<K, V> getNextInAccessQueue() {
1129      return nextAccess;
1130    }
1131
1132    @Override
1133    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1134      this.nextAccess = next;
1135    }
1136
1137    // Guarded By Segment.this
1138    ReferenceEntry<K, V> previousAccess = nullEntry();
1139
1140    @Override
1141    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1142      return previousAccess;
1143    }
1144
1145    @Override
1146    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1147      this.previousAccess = previous;
1148    }
1149  }
1150
1151  static final class StrongWriteEntry<K, V> extends StrongEntry<K, V> {
1152    StrongWriteEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1153      super(key, hash, next);
1154    }
1155
1156    // The code below is exactly the same for each write entry type.
1157
1158    volatile long writeTime = Long.MAX_VALUE;
1159
1160    @Override
1161    public long getWriteTime() {
1162      return writeTime;
1163    }
1164
1165    @Override
1166    public void setWriteTime(long time) {
1167      this.writeTime = time;
1168    }
1169
1170    // Guarded By Segment.this
1171    ReferenceEntry<K, V> nextWrite = nullEntry();
1172
1173    @Override
1174    public ReferenceEntry<K, V> getNextInWriteQueue() {
1175      return nextWrite;
1176    }
1177
1178    @Override
1179    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1180      this.nextWrite = next;
1181    }
1182
1183    // Guarded By Segment.this
1184    ReferenceEntry<K, V> previousWrite = nullEntry();
1185
1186    @Override
1187    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1188      return previousWrite;
1189    }
1190
1191    @Override
1192    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1193      this.previousWrite = previous;
1194    }
1195  }
1196
1197  static final class StrongAccessWriteEntry<K, V> extends StrongEntry<K, V> {
1198    StrongAccessWriteEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1199      super(key, hash, next);
1200    }
1201
1202    // The code below is exactly the same for each access entry type.
1203
1204    volatile long accessTime = Long.MAX_VALUE;
1205
1206    @Override
1207    public long getAccessTime() {
1208      return accessTime;
1209    }
1210
1211    @Override
1212    public void setAccessTime(long time) {
1213      this.accessTime = time;
1214    }
1215
1216    // Guarded By Segment.this
1217    ReferenceEntry<K, V> nextAccess = nullEntry();
1218
1219    @Override
1220    public ReferenceEntry<K, V> getNextInAccessQueue() {
1221      return nextAccess;
1222    }
1223
1224    @Override
1225    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1226      this.nextAccess = next;
1227    }
1228
1229    // Guarded By Segment.this
1230    ReferenceEntry<K, V> previousAccess = nullEntry();
1231
1232    @Override
1233    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1234      return previousAccess;
1235    }
1236
1237    @Override
1238    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1239      this.previousAccess = previous;
1240    }
1241
1242    // The code below is exactly the same for each write entry type.
1243
1244    volatile long writeTime = Long.MAX_VALUE;
1245
1246    @Override
1247    public long getWriteTime() {
1248      return writeTime;
1249    }
1250
1251    @Override
1252    public void setWriteTime(long time) {
1253      this.writeTime = time;
1254    }
1255
1256    // Guarded By Segment.this
1257    ReferenceEntry<K, V> nextWrite = nullEntry();
1258
1259    @Override
1260    public ReferenceEntry<K, V> getNextInWriteQueue() {
1261      return nextWrite;
1262    }
1263
1264    @Override
1265    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1266      this.nextWrite = next;
1267    }
1268
1269    // Guarded By Segment.this
1270    ReferenceEntry<K, V> previousWrite = nullEntry();
1271
1272    @Override
1273    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1274      return previousWrite;
1275    }
1276
1277    @Override
1278    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1279      this.previousWrite = previous;
1280    }
1281  }
1282
1283  /**
1284   * Used for weakly-referenced keys.
1285   */
1286  static class WeakEntry<K, V> extends WeakReference<K> implements ReferenceEntry<K, V> {
1287    WeakEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1288      super(key, queue);
1289      this.hash = hash;
1290      this.next = next;
1291    }
1292
1293    @Override
1294    public K getKey() {
1295      return get();
1296    }
1297
1298    /*
1299     * It'd be nice to get these for free from AbstractReferenceEntry, but we're already extending
1300     * WeakReference<K>.
1301     */
1302
1303    // null access
1304
1305    @Override
1306    public long getAccessTime() {
1307      throw new UnsupportedOperationException();
1308    }
1309
1310    @Override
1311    public void setAccessTime(long time) {
1312      throw new UnsupportedOperationException();
1313    }
1314
1315    @Override
1316    public ReferenceEntry<K, V> getNextInAccessQueue() {
1317      throw new UnsupportedOperationException();
1318    }
1319
1320    @Override
1321    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1322      throw new UnsupportedOperationException();
1323    }
1324
1325    @Override
1326    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1327      throw new UnsupportedOperationException();
1328    }
1329
1330    @Override
1331    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1332      throw new UnsupportedOperationException();
1333    }
1334
1335    // null write
1336
1337    @Override
1338    public long getWriteTime() {
1339      throw new UnsupportedOperationException();
1340    }
1341
1342    @Override
1343    public void setWriteTime(long time) {
1344      throw new UnsupportedOperationException();
1345    }
1346
1347    @Override
1348    public ReferenceEntry<K, V> getNextInWriteQueue() {
1349      throw new UnsupportedOperationException();
1350    }
1351
1352    @Override
1353    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1354      throw new UnsupportedOperationException();
1355    }
1356
1357    @Override
1358    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1359      throw new UnsupportedOperationException();
1360    }
1361
1362    @Override
1363    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1364      throw new UnsupportedOperationException();
1365    }
1366
1367    // The code below is exactly the same for each entry type.
1368
1369    final int hash;
1370    final ReferenceEntry<K, V> next;
1371    volatile ValueReference<K, V> valueReference = unset();
1372
1373    @Override
1374    public ValueReference<K, V> getValueReference() {
1375      return valueReference;
1376    }
1377
1378    @Override
1379    public void setValueReference(ValueReference<K, V> valueReference) {
1380      this.valueReference = valueReference;
1381    }
1382
1383    @Override
1384    public int getHash() {
1385      return hash;
1386    }
1387
1388    @Override
1389    public ReferenceEntry<K, V> getNext() {
1390      return next;
1391    }
1392  }
1393
1394  static final class WeakAccessEntry<K, V> extends WeakEntry<K, V> {
1395    WeakAccessEntry(
1396        ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1397      super(queue, key, hash, next);
1398    }
1399
1400    // The code below is exactly the same for each access entry type.
1401
1402    volatile long accessTime = Long.MAX_VALUE;
1403
1404    @Override
1405    public long getAccessTime() {
1406      return accessTime;
1407    }
1408
1409    @Override
1410    public void setAccessTime(long time) {
1411      this.accessTime = time;
1412    }
1413
1414    // Guarded By Segment.this
1415    ReferenceEntry<K, V> nextAccess = nullEntry();
1416
1417    @Override
1418    public ReferenceEntry<K, V> getNextInAccessQueue() {
1419      return nextAccess;
1420    }
1421
1422    @Override
1423    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1424      this.nextAccess = next;
1425    }
1426
1427    // Guarded By Segment.this
1428    ReferenceEntry<K, V> previousAccess = nullEntry();
1429
1430    @Override
1431    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1432      return previousAccess;
1433    }
1434
1435    @Override
1436    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1437      this.previousAccess = previous;
1438    }
1439  }
1440
1441  static final class WeakWriteEntry<K, V> extends WeakEntry<K, V> {
1442    WeakWriteEntry(
1443        ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1444      super(queue, key, hash, next);
1445    }
1446
1447    // The code below is exactly the same for each write entry type.
1448
1449    volatile long writeTime = Long.MAX_VALUE;
1450
1451    @Override
1452    public long getWriteTime() {
1453      return writeTime;
1454    }
1455
1456    @Override
1457    public void setWriteTime(long time) {
1458      this.writeTime = time;
1459    }
1460
1461    // Guarded By Segment.this
1462    ReferenceEntry<K, V> nextWrite = nullEntry();
1463
1464    @Override
1465    public ReferenceEntry<K, V> getNextInWriteQueue() {
1466      return nextWrite;
1467    }
1468
1469    @Override
1470    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1471      this.nextWrite = next;
1472    }
1473
1474    // Guarded By Segment.this
1475    ReferenceEntry<K, V> previousWrite = nullEntry();
1476
1477    @Override
1478    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1479      return previousWrite;
1480    }
1481
1482    @Override
1483    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1484      this.previousWrite = previous;
1485    }
1486  }
1487
1488  static final class WeakAccessWriteEntry<K, V> extends WeakEntry<K, V> {
1489    WeakAccessWriteEntry(
1490        ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1491      super(queue, key, hash, next);
1492    }
1493
1494    // The code below is exactly the same for each access entry type.
1495
1496    volatile long accessTime = Long.MAX_VALUE;
1497
1498    @Override
1499    public long getAccessTime() {
1500      return accessTime;
1501    }
1502
1503    @Override
1504    public void setAccessTime(long time) {
1505      this.accessTime = time;
1506    }
1507
1508    // Guarded By Segment.this
1509    ReferenceEntry<K, V> nextAccess = nullEntry();
1510
1511    @Override
1512    public ReferenceEntry<K, V> getNextInAccessQueue() {
1513      return nextAccess;
1514    }
1515
1516    @Override
1517    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1518      this.nextAccess = next;
1519    }
1520
1521    // Guarded By Segment.this
1522    ReferenceEntry<K, V> previousAccess = nullEntry();
1523
1524    @Override
1525    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1526      return previousAccess;
1527    }
1528
1529    @Override
1530    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1531      this.previousAccess = previous;
1532    }
1533
1534    // The code below is exactly the same for each write entry type.
1535
1536    volatile long writeTime = Long.MAX_VALUE;
1537
1538    @Override
1539    public long getWriteTime() {
1540      return writeTime;
1541    }
1542
1543    @Override
1544    public void setWriteTime(long time) {
1545      this.writeTime = time;
1546    }
1547
1548    // Guarded By Segment.this
1549    ReferenceEntry<K, V> nextWrite = nullEntry();
1550
1551    @Override
1552    public ReferenceEntry<K, V> getNextInWriteQueue() {
1553      return nextWrite;
1554    }
1555
1556    @Override
1557    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1558      this.nextWrite = next;
1559    }
1560
1561    // Guarded By Segment.this
1562    ReferenceEntry<K, V> previousWrite = nullEntry();
1563
1564    @Override
1565    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1566      return previousWrite;
1567    }
1568
1569    @Override
1570    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1571      this.previousWrite = previous;
1572    }
1573  }
1574
1575  /**
1576   * References a weak value.
1577   */
1578  static class WeakValueReference<K, V>
1579      extends WeakReference<V> implements ValueReference<K, V> {
1580    final ReferenceEntry<K, V> entry;
1581
1582    WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) {
1583      super(referent, queue);
1584      this.entry = entry;
1585    }
1586
1587    @Override
1588    public int getWeight() {
1589      return 1;
1590    }
1591
1592    @Override
1593    public ReferenceEntry<K, V> getEntry() {
1594      return entry;
1595    }
1596
1597    @Override
1598    public void notifyNewValue(V newValue) {}
1599
1600    @Override
1601    public ValueReference<K, V> copyFor(
1602        ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1603      return new WeakValueReference<K, V>(queue, value, entry);
1604    }
1605
1606    @Override
1607    public boolean isLoading() {
1608      return false;
1609    }
1610
1611    @Override
1612    public boolean isActive() {
1613      return true;
1614    }
1615
1616    @Override
1617    public V waitForValue() {
1618      return get();
1619    }
1620  }
1621
1622  /**
1623   * References a soft value.
1624   */
1625  static class SoftValueReference<K, V>
1626      extends SoftReference<V> implements ValueReference<K, V> {
1627    final ReferenceEntry<K, V> entry;
1628
1629    SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) {
1630      super(referent, queue);
1631      this.entry = entry;
1632    }
1633
1634    @Override
1635    public int getWeight() {
1636      return 1;
1637    }
1638
1639    @Override
1640    public ReferenceEntry<K, V> getEntry() {
1641      return entry;
1642    }
1643
1644    @Override
1645    public void notifyNewValue(V newValue) {}
1646
1647    @Override
1648    public ValueReference<K, V> copyFor(
1649        ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1650      return new SoftValueReference<K, V>(queue, value, entry);
1651    }
1652
1653    @Override
1654    public boolean isLoading() {
1655      return false;
1656    }
1657
1658    @Override
1659    public boolean isActive() {
1660      return true;
1661    }
1662
1663    @Override
1664    public V waitForValue() {
1665      return get();
1666    }
1667  }
1668
1669  /**
1670   * References a strong value.
1671   */
1672  static class StrongValueReference<K, V> implements ValueReference<K, V> {
1673    final V referent;
1674
1675    StrongValueReference(V referent) {
1676      this.referent = referent;
1677    }
1678
1679    @Override
1680    public V get() {
1681      return referent;
1682    }
1683
1684    @Override
1685    public int getWeight() {
1686      return 1;
1687    }
1688
1689    @Override
1690    public ReferenceEntry<K, V> getEntry() {
1691      return null;
1692    }
1693
1694    @Override
1695    public ValueReference<K, V> copyFor(
1696        ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1697      return this;
1698    }
1699
1700    @Override
1701    public boolean isLoading() {
1702      return false;
1703    }
1704
1705    @Override
1706    public boolean isActive() {
1707      return true;
1708    }
1709
1710    @Override
1711    public V waitForValue() {
1712      return get();
1713    }
1714
1715    @Override
1716    public void notifyNewValue(V newValue) {}
1717  }
1718
1719  /**
1720   * References a weak value.
1721   */
1722  static final class WeightedWeakValueReference<K, V> extends WeakValueReference<K, V> {
1723    final int weight;
1724
1725    WeightedWeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry,
1726        int weight) {
1727      super(queue, referent, entry);
1728      this.weight = weight;
1729    }
1730
1731    @Override
1732    public int getWeight() {
1733      return weight;
1734    }
1735
1736    @Override
1737    public ValueReference<K, V> copyFor(
1738        ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1739      return new WeightedWeakValueReference<K, V>(queue, value, entry, weight);
1740    }
1741  }
1742
1743  /**
1744   * References a soft value.
1745   */
1746  static final class WeightedSoftValueReference<K, V> extends SoftValueReference<K, V> {
1747    final int weight;
1748
1749    WeightedSoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry,
1750        int weight) {
1751      super(queue, referent, entry);
1752      this.weight = weight;
1753    }
1754
1755    @Override
1756    public int getWeight() {
1757      return weight;
1758    }
1759    @Override
1760    public ValueReference<K, V> copyFor(
1761        ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1762      return new WeightedSoftValueReference<K, V>(queue, value, entry, weight);
1763    }
1764
1765  }
1766
1767  /**
1768   * References a strong value.
1769   */
1770  static final class WeightedStrongValueReference<K, V> extends StrongValueReference<K, V> {
1771    final int weight;
1772
1773    WeightedStrongValueReference(V referent, int weight) {
1774      super(referent);
1775      this.weight = weight;
1776    }
1777
1778    @Override
1779    public int getWeight() {
1780      return weight;
1781    }
1782  }
1783
1784  /**
1785   * Applies a supplemental hash function to a given hash code, which defends against poor quality
1786   * hash functions. This is critical when the concurrent hash map uses power-of-two length hash
1787   * tables, that otherwise encounter collisions for hash codes that do not differ in lower or
1788   * upper bits.
1789   *
1790   * @param h hash code
1791   */
1792  static int rehash(int h) {
1793    // Spread bits to regularize both segment and index locations,
1794    // using variant of single-word Wang/Jenkins hash.
1795    // TODO(kevinb): use Hashing/move this to Hashing?
1796    h += (h << 15) ^ 0xffffcd7d;
1797    h ^= (h >>> 10);
1798    h += (h << 3);
1799    h ^= (h >>> 6);
1800    h += (h << 2) + (h << 14);
1801    return h ^ (h >>> 16);
1802  }
1803
1804  /**
1805   * This method is a convenience for testing. Code should call {@link Segment#newEntry} directly.
1806   */
1807  @VisibleForTesting
1808  ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1809    Segment<K, V> segment = segmentFor(hash);
1810    segment.lock();
1811    try {
1812      return segment.newEntry(key, hash, next);
1813    } finally {
1814      segment.unlock();
1815    }
1816  }
1817
1818  /**
1819   * This method is a convenience for testing. Code should call {@link Segment#copyEntry} directly.
1820   */
1821  // Guarded By Segment.this
1822  @VisibleForTesting
1823  ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
1824    int hash = original.getHash();
1825    return segmentFor(hash).copyEntry(original, newNext);
1826  }
1827
1828  /**
1829   * This method is a convenience for testing. Code should call {@link Segment#setValue} instead.
1830   */
1831  // Guarded By Segment.this
1832  @VisibleForTesting
1833  ValueReference<K, V> newValueReference(ReferenceEntry<K, V> entry, V value, int weight) {
1834    int hash = entry.getHash();
1835    return valueStrength.referenceValue(segmentFor(hash), entry, checkNotNull(value), weight);
1836  }
1837
1838  int hash(@Nullable Object key) {
1839    int h = keyEquivalence.hash(key);
1840    return rehash(h);
1841  }
1842
1843  void reclaimValue(ValueReference<K, V> valueReference) {
1844    ReferenceEntry<K, V> entry = valueReference.getEntry();
1845    int hash = entry.getHash();
1846    segmentFor(hash).reclaimValue(entry.getKey(), hash, valueReference);
1847  }
1848
1849  void reclaimKey(ReferenceEntry<K, V> entry) {
1850    int hash = entry.getHash();
1851    segmentFor(hash).reclaimKey(entry, hash);
1852  }
1853
1854  /**
1855   * This method is a convenience for testing. Code should call {@link Segment#getLiveValue}
1856   * instead.
1857   */
1858  @VisibleForTesting
1859  boolean isLive(ReferenceEntry<K, V> entry, long now) {
1860    return segmentFor(entry.getHash()).getLiveValue(entry, now) != null;
1861  }
1862
1863  /**
1864   * Returns the segment that should be used for a key with the given hash.
1865   *
1866   * @param hash the hash code for the key
1867   * @return the segment
1868   */
1869  Segment<K, V> segmentFor(int hash) {
1870    // TODO(fry): Lazily create segments?
1871    return segments[(hash >>> segmentShift) & segmentMask];
1872  }
1873
1874  Segment<K, V> createSegment(
1875      int initialCapacity, long maxSegmentWeight, StatsCounter statsCounter) {
1876    return new Segment<K, V>(this, initialCapacity, maxSegmentWeight, statsCounter);
1877  }
1878
1879  /**
1880   * Gets the value from an entry. Returns null if the entry is invalid, partially-collected,
1881   * loading, or expired. Unlike {@link Segment#getLiveValue} this method does not attempt to
1882   * cleanup stale entries. As such it should only be called outside of a segment context, such as
1883   * during iteration.
1884   */
1885  @Nullable
1886  V getLiveValue(ReferenceEntry<K, V> entry, long now) {
1887    if (entry.getKey() == null) {
1888      return null;
1889    }
1890    V value = entry.getValueReference().get();
1891    if (value == null) {
1892      return null;
1893    }
1894
1895    if (isExpired(entry, now)) {
1896      return null;
1897    }
1898    return value;
1899  }
1900
1901  // expiration
1902
1903  /**
1904   * Returns true if the entry has expired.
1905   */
1906  boolean isExpired(ReferenceEntry<K, V> entry, long now) {
1907    checkNotNull(entry);
1908    if (expiresAfterAccess()
1909        && (now - entry.getAccessTime() >= expireAfterAccessNanos)) {
1910      return true;
1911    }
1912    if (expiresAfterWrite()
1913        && (now - entry.getWriteTime() >= expireAfterWriteNanos)) {
1914      return true;
1915    }
1916    return false;
1917  }
1918
1919  // queues
1920
1921  // Guarded By Segment.this
1922  static <K, V> void connectAccessOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) {
1923    previous.setNextInAccessQueue(next);
1924    next.setPreviousInAccessQueue(previous);
1925  }
1926
1927  // Guarded By Segment.this
1928  static <K, V> void nullifyAccessOrder(ReferenceEntry<K, V> nulled) {
1929    ReferenceEntry<K, V> nullEntry = nullEntry();
1930    nulled.setNextInAccessQueue(nullEntry);
1931    nulled.setPreviousInAccessQueue(nullEntry);
1932  }
1933
1934  // Guarded By Segment.this
1935  static <K, V> void connectWriteOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) {
1936    previous.setNextInWriteQueue(next);
1937    next.setPreviousInWriteQueue(previous);
1938  }
1939
1940  // Guarded By Segment.this
1941  static <K, V> void nullifyWriteOrder(ReferenceEntry<K, V> nulled) {
1942    ReferenceEntry<K, V> nullEntry = nullEntry();
1943    nulled.setNextInWriteQueue(nullEntry);
1944    nulled.setPreviousInWriteQueue(nullEntry);
1945  }
1946
1947  /**
1948   * Notifies listeners that an entry has been automatically removed due to expiration, eviction,
1949   * or eligibility for garbage collection. This should be called every time expireEntries or
1950   * evictEntry is called (once the lock is released).
1951   */
1952  void processPendingNotifications() {
1953    RemovalNotification<K, V> notification;
1954    while ((notification = removalNotificationQueue.poll()) != null) {
1955      try {
1956        removalListener.onRemoval(notification);
1957      } catch (Throwable e) {
1958        logger.log(Level.WARNING, "Exception thrown by removal listener", e);
1959      }
1960    }
1961  }
1962
1963  @SuppressWarnings("unchecked")
1964  final Segment<K, V>[] newSegmentArray(int ssize) {
1965    return new Segment[ssize];
1966  }
1967
1968  // Inner Classes
1969
1970  /**
1971   * Segments are specialized versions of hash tables. This subclass inherits from ReentrantLock
1972   * opportunistically, just to simplify some locking and avoid separate construction.
1973   */
1974  @SuppressWarnings("serial") // This class is never serialized.
1975  static class Segment<K, V> extends ReentrantLock {
1976
1977    /*
1978     * TODO(fry): Consider copying variables (like evictsBySize) from outer class into this class.
1979     * It will require more memory but will reduce indirection.
1980     */
1981
1982    /*
1983     * Segments maintain a table of entry lists that are ALWAYS kept in a consistent state, so can
1984     * be read without locking. Next fields of nodes are immutable (final). All list additions are
1985     * performed at the front of each bin. This makes it easy to check changes, and also fast to
1986     * traverse. When nodes would otherwise be changed, new nodes are created to replace them. This
1987     * works well for hash tables since the bin lists tend to be short. (The average length is less
1988     * than two.)
1989     *
1990     * Read operations can thus proceed without locking, but rely on selected uses of volatiles to
1991     * ensure that completed write operations performed by other threads are noticed. For most
1992     * purposes, the "count" field, tracking the number of elements, serves as that volatile
1993     * variable ensuring visibility. This is convenient because this field needs to be read in many
1994     * read operations anyway:
1995     *
1996     * - All (unsynchronized) read operations must first read the "count" field, and should not
1997     * look at table entries if it is 0.
1998     *
1999     * - All (synchronized) write operations should write to the "count" field after structurally
2000     * changing any bin. The operations must not take any action that could even momentarily
2001     * cause a concurrent read operation to see inconsistent data. This is made easier by the
2002     * nature of the read operations in Map. For example, no operation can reveal that the table
2003     * has grown but the threshold has not yet been updated, so there are no atomicity requirements
2004     * for this with respect to reads.
2005     *
2006     * As a guide, all critical volatile reads and writes to the count field are marked in code
2007     * comments.
2008     */
2009
2010    final LocalCache<K, V> map;
2011
2012    /**
2013     * The number of live elements in this segment's region.
2014     */
2015    volatile int count;
2016
2017    /**
2018     * The weight of the live elements in this segment's region.
2019     */
2020    @GuardedBy("this")
2021    long totalWeight;
2022
2023    /**
2024     * Number of updates that alter the size of the table. This is used during bulk-read methods to
2025     * make sure they see a consistent snapshot: If modCounts change during a traversal of segments
2026     * loading size or checking containsValue, then we might have an inconsistent view of state
2027     * so (usually) must retry.
2028     */
2029    int modCount;
2030
2031    /**
2032     * The table is expanded when its size exceeds this threshold. (The value of this field is
2033     * always {@code (int) (capacity * 0.75)}.)
2034     */
2035    int threshold;
2036
2037    /**
2038     * The per-segment table.
2039     */
2040    volatile AtomicReferenceArray<ReferenceEntry<K, V>> table;
2041
2042    /**
2043     * The maximum weight of this segment. UNSET_INT if there is no maximum.
2044     */
2045    final long maxSegmentWeight;
2046
2047    /**
2048     * The key reference queue contains entries whose keys have been garbage collected, and which
2049     * need to be cleaned up internally.
2050     */
2051    final ReferenceQueue<K> keyReferenceQueue;
2052
2053    /**
2054     * The value reference queue contains value references whose values have been garbage collected,
2055     * and which need to be cleaned up internally.
2056     */
2057    final ReferenceQueue<V> valueReferenceQueue;
2058
2059    /**
2060     * The recency queue is used to record which entries were accessed for updating the access
2061     * list's ordering. It is drained as a batch operation when either the DRAIN_THRESHOLD is
2062     * crossed or a write occurs on the segment.
2063     */
2064    final Queue<ReferenceEntry<K, V>> recencyQueue;
2065
2066    /**
2067     * A counter of the number of reads since the last write, used to drain queues on a small
2068     * fraction of read operations.
2069     */
2070    final AtomicInteger readCount = new AtomicInteger();
2071
2072    /**
2073     * A queue of elements currently in the map, ordered by write time. Elements are added to the
2074     * tail of the queue on write.
2075     */
2076    @GuardedBy("this")
2077    final Queue<ReferenceEntry<K, V>> writeQueue;
2078
2079    /**
2080     * A queue of elements currently in the map, ordered by access time. Elements are added to the
2081     * tail of the queue on access (note that writes count as accesses).
2082     */
2083    @GuardedBy("this")
2084    final Queue<ReferenceEntry<K, V>> accessQueue;
2085
2086    /** Accumulates cache statistics. */
2087    final StatsCounter statsCounter;
2088
2089    Segment(LocalCache<K, V> map, int initialCapacity, long maxSegmentWeight,
2090        StatsCounter statsCounter) {
2091      this.map = map;
2092      this.maxSegmentWeight = maxSegmentWeight;
2093      this.statsCounter = checkNotNull(statsCounter);
2094      initTable(newEntryArray(initialCapacity));
2095
2096      keyReferenceQueue = map.usesKeyReferences()
2097           ? new ReferenceQueue<K>() : null;
2098
2099      valueReferenceQueue = map.usesValueReferences()
2100           ? new ReferenceQueue<V>() : null;
2101
2102      recencyQueue = map.usesAccessQueue()
2103          ? new ConcurrentLinkedQueue<ReferenceEntry<K, V>>()
2104          : LocalCache.<ReferenceEntry<K, V>>discardingQueue();
2105
2106      writeQueue = map.usesWriteQueue()
2107          ? new WriteQueue<K, V>()
2108          : LocalCache.<ReferenceEntry<K, V>>discardingQueue();
2109
2110      accessQueue = map.usesAccessQueue()
2111          ? new AccessQueue<K, V>()
2112          : LocalCache.<ReferenceEntry<K, V>>discardingQueue();
2113    }
2114
2115    AtomicReferenceArray<ReferenceEntry<K, V>> newEntryArray(int size) {
2116      return new AtomicReferenceArray<ReferenceEntry<K, V>>(size);
2117    }
2118
2119    void initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable) {
2120      this.threshold = newTable.length() * 3 / 4; // 0.75
2121      if (!map.customWeigher() && this.threshold == maxSegmentWeight) {
2122        // prevent spurious expansion before eviction
2123        this.threshold++;
2124      }
2125      this.table = newTable;
2126    }
2127
2128    @GuardedBy("this")
2129    ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
2130      return map.entryFactory.newEntry(this, checkNotNull(key), hash, next);
2131    }
2132
2133    /**
2134     * Copies {@code original} into a new entry chained to {@code newNext}. Returns the new entry,
2135     * or {@code null} if {@code original} was already garbage collected.
2136     */
2137    @GuardedBy("this")
2138    ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
2139      if (original.getKey() == null) {
2140        // key collected
2141        return null;
2142      }
2143
2144      ValueReference<K, V> valueReference = original.getValueReference();
2145      V value = valueReference.get();
2146      if ((value == null) && valueReference.isActive()) {
2147        // value collected
2148        return null;
2149      }
2150
2151      ReferenceEntry<K, V> newEntry = map.entryFactory.copyEntry(this, original, newNext);
2152      newEntry.setValueReference(valueReference.copyFor(this.valueReferenceQueue, value, newEntry));
2153      return newEntry;
2154    }
2155
2156    /**
2157     * Sets a new value of an entry. Adds newly created entries at the end of the access queue.
2158     */
2159    @GuardedBy("this")
2160    void setValue(ReferenceEntry<K, V> entry, K key, V value, long now) {
2161      ValueReference<K, V> previous = entry.getValueReference();
2162      int weight = map.weigher.weigh(key, value);
2163      checkState(weight >= 0, "Weights must be non-negative");
2164
2165      ValueReference<K, V> valueReference =
2166          map.valueStrength.referenceValue(this, entry, value, weight);
2167      entry.setValueReference(valueReference);
2168      recordWrite(entry, weight, now);
2169      previous.notifyNewValue(value);
2170    }
2171
2172    // loading
2173
2174    V get(K key, int hash, CacheLoader<? super K, V> loader) throws ExecutionException {
2175      checkNotNull(key);
2176      checkNotNull(loader);
2177      try {
2178        if (count != 0) { // read-volatile
2179          // don't call getLiveEntry, which would ignore loading values
2180          ReferenceEntry<K, V> e = getEntry(key, hash);
2181          if (e != null) {
2182            long now = map.ticker.read();
2183            V value = getLiveValue(e, now);
2184            if (value != null) {
2185              recordRead(e, now);
2186              statsCounter.recordHits(1);
2187              return scheduleRefresh(e, key, hash, value, now, loader);
2188            }
2189            ValueReference<K, V> valueReference = e.getValueReference();
2190            if (valueReference.isLoading()) {
2191              return waitForLoadingValue(e, key, valueReference);
2192            }
2193          }
2194        }
2195
2196        // at this point e is either null or expired;
2197        return lockedGetOrLoad(key, hash, loader);
2198      } catch (ExecutionException ee) {
2199        Throwable cause = ee.getCause();
2200        if (cause instanceof Error) {
2201          throw new ExecutionError((Error) cause);
2202        } else if (cause instanceof RuntimeException) {
2203          throw new UncheckedExecutionException(cause);
2204        }
2205        throw ee;
2206      } finally {
2207        postReadCleanup();
2208      }
2209    }
2210
2211    V lockedGetOrLoad(K key, int hash, CacheLoader<? super K, V> loader)
2212        throws ExecutionException {
2213      ReferenceEntry<K, V> e;
2214      ValueReference<K, V> valueReference = null;
2215      LoadingValueReference<K, V> loadingValueReference = null;
2216      boolean createNewEntry = true;
2217
2218      lock();
2219      try {
2220        // re-read ticker once inside the lock
2221        long now = map.ticker.read();
2222        preWriteCleanup(now);
2223
2224        int newCount = this.count - 1;
2225        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2226        int index = hash & (table.length() - 1);
2227        ReferenceEntry<K, V> first = table.get(index);
2228
2229        for (e = first; e != null; e = e.getNext()) {
2230          K entryKey = e.getKey();
2231          if (e.getHash() == hash && entryKey != null
2232              && map.keyEquivalence.equivalent(key, entryKey)) {
2233            valueReference = e.getValueReference();
2234            if (valueReference.isLoading()) {
2235              createNewEntry = false;
2236            } else {
2237              V value = valueReference.get();
2238              if (value == null) {
2239                enqueueNotification(entryKey, hash, valueReference, RemovalCause.COLLECTED);
2240              } else if (map.isExpired(e, now)) {
2241                // This is a duplicate check, as preWriteCleanup already purged expired
2242                // entries, but let's accomodate an incorrect expiration queue.
2243                enqueueNotification(entryKey, hash, valueReference, RemovalCause.EXPIRED);
2244              } else {
2245                recordLockedRead(e, now);
2246                statsCounter.recordHits(1);
2247                // we were concurrent with loading; don't consider refresh
2248                return value;
2249              }
2250
2251              // immediately reuse invalid entries
2252              writeQueue.remove(e);
2253              accessQueue.remove(e);
2254              this.count = newCount; // write-volatile
2255            }
2256            break;
2257          }
2258        }
2259
2260        if (createNewEntry) {
2261          loadingValueReference = new LoadingValueReference<K, V>();
2262
2263          if (e == null) {
2264            e = newEntry(key, hash, first);
2265            e.setValueReference(loadingValueReference);
2266            table.set(index, e);
2267          } else {
2268            e.setValueReference(loadingValueReference);
2269          }
2270        }
2271      } finally {
2272        unlock();
2273        postWriteCleanup();
2274      }
2275
2276      if (createNewEntry) {
2277        try {
2278          // Synchronizes on the entry to allow failing fast when a recursive load is
2279          // detected. This may be circumvented when an entry is copied, but will fail fast most
2280          // of the time.
2281          synchronized (e) {
2282            return loadSync(key, hash, loadingValueReference, loader);
2283          }
2284        } finally {
2285          statsCounter.recordMisses(1);
2286        }
2287      } else {
2288        // The entry already exists. Wait for loading.
2289        return waitForLoadingValue(e, key, valueReference);
2290      }
2291    }
2292
2293    V waitForLoadingValue(ReferenceEntry<K, V> e, K key, ValueReference<K, V> valueReference)
2294        throws ExecutionException {
2295      if (!valueReference.isLoading()) {
2296        throw new AssertionError();
2297      }
2298
2299      checkState(!Thread.holdsLock(e), "Recursive load of: %s", key);
2300      // don't consider expiration as we're concurrent with loading
2301      try {
2302        V value = valueReference.waitForValue();
2303        if (value == null) {
2304          throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + ".");
2305        }
2306        // re-read ticker now that loading has completed
2307        long now = map.ticker.read();
2308        recordRead(e, now);
2309        return value;
2310      } finally {
2311        statsCounter.recordMisses(1);
2312      }
2313    }
2314
2315    // at most one of loadSync/loadAsync may be called for any given LoadingValueReference
2316
2317    V loadSync(K key, int hash, LoadingValueReference<K, V> loadingValueReference,
2318        CacheLoader<? super K, V> loader) throws ExecutionException {
2319      ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader);
2320      return getAndRecordStats(key, hash, loadingValueReference, loadingFuture);
2321    }
2322
2323    ListenableFuture<V> loadAsync(final K key, final int hash,
2324        final LoadingValueReference<K, V> loadingValueReference, CacheLoader<? super K, V> loader) {
2325      final ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader);
2326      loadingFuture.addListener(
2327          new Runnable() {
2328            @Override
2329            public void run() {
2330              try {
2331                V newValue = getAndRecordStats(key, hash, loadingValueReference, loadingFuture);
2332              } catch (Throwable t) {
2333                logger.log(Level.WARNING, "Exception thrown during refresh", t);
2334                loadingValueReference.setException(t);
2335              }
2336            }
2337          }, directExecutor());
2338      return loadingFuture;
2339    }
2340
2341    /**
2342     * Waits uninterruptibly for {@code newValue} to be loaded, and then records loading stats.
2343     */
2344    V getAndRecordStats(K key, int hash, LoadingValueReference<K, V> loadingValueReference,
2345        ListenableFuture<V> newValue) throws ExecutionException {
2346      V value = null;
2347      try {
2348        value = getUninterruptibly(newValue);
2349        if (value == null) {
2350          throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + ".");
2351        }
2352        statsCounter.recordLoadSuccess(loadingValueReference.elapsedNanos());
2353        storeLoadedValue(key, hash, loadingValueReference, value);
2354        return value;
2355      } finally {
2356        if (value == null) {
2357          statsCounter.recordLoadException(loadingValueReference.elapsedNanos());
2358          removeLoadingValue(key, hash, loadingValueReference);
2359        }
2360      }
2361    }
2362
2363    V scheduleRefresh(ReferenceEntry<K, V> entry, K key, int hash, V oldValue, long now,
2364        CacheLoader<? super K, V> loader) {
2365      if (map.refreshes() && (now - entry.getWriteTime() > map.refreshNanos)
2366          && !entry.getValueReference().isLoading()) {
2367        V newValue = refresh(key, hash, loader, true);
2368        if (newValue != null) {
2369          return newValue;
2370        }
2371      }
2372      return oldValue;
2373    }
2374
2375    /**
2376     * Refreshes the value associated with {@code key}, unless another thread is already doing so.
2377     * Returns the newly refreshed value associated with {@code key} if it was refreshed inline, or
2378     * {@code null} if another thread is performing the refresh or if an error occurs during
2379     * refresh.
2380     */
2381    @Nullable
2382    V refresh(K key, int hash, CacheLoader<? super K, V> loader, boolean checkTime) {
2383      final LoadingValueReference<K, V> loadingValueReference =
2384          insertLoadingValueReference(key, hash, checkTime);
2385      if (loadingValueReference == null) {
2386        return null;
2387      }
2388
2389      ListenableFuture<V> result = loadAsync(key, hash, loadingValueReference, loader);
2390      if (result.isDone()) {
2391        try {
2392          return Uninterruptibles.getUninterruptibly(result);
2393        } catch (Throwable t) {
2394          // don't let refresh exceptions propagate; error was already logged
2395        }
2396      }
2397      return null;
2398    }
2399
2400    /**
2401     * Returns a newly inserted {@code LoadingValueReference}, or null if the live value reference
2402     * is already loading.
2403     */
2404    @Nullable
2405    LoadingValueReference<K, V> insertLoadingValueReference(final K key, final int hash,
2406        boolean checkTime) {
2407      ReferenceEntry<K, V> e = null;
2408      lock();
2409      try {
2410        long now = map.ticker.read();
2411        preWriteCleanup(now);
2412
2413        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2414        int index = hash & (table.length() - 1);
2415        ReferenceEntry<K, V> first = table.get(index);
2416
2417        // Look for an existing entry.
2418        for (e = first; e != null; e = e.getNext()) {
2419          K entryKey = e.getKey();
2420          if (e.getHash() == hash && entryKey != null
2421              && map.keyEquivalence.equivalent(key, entryKey)) {
2422            // We found an existing entry.
2423
2424            ValueReference<K, V> valueReference = e.getValueReference();
2425            if (valueReference.isLoading()
2426                || (checkTime && (now - e.getWriteTime() < map.refreshNanos))) {
2427              // refresh is a no-op if loading is pending
2428              // if checkTime, we want to check *after* acquiring the lock if refresh still needs
2429              // to be scheduled
2430              return null;
2431            }
2432
2433            // continue returning old value while loading
2434            ++modCount;
2435            LoadingValueReference<K, V> loadingValueReference =
2436                new LoadingValueReference<K, V>(valueReference);
2437            e.setValueReference(loadingValueReference);
2438            return loadingValueReference;
2439          }
2440        }
2441
2442        ++modCount;
2443        LoadingValueReference<K, V> loadingValueReference = new LoadingValueReference<K, V>();
2444        e = newEntry(key, hash, first);
2445        e.setValueReference(loadingValueReference);
2446        table.set(index, e);
2447        return loadingValueReference;
2448      } finally {
2449        unlock();
2450        postWriteCleanup();
2451      }
2452    }
2453
2454    // reference queues, for garbage collection cleanup
2455
2456    /**
2457     * Cleanup collected entries when the lock is available.
2458     */
2459    void tryDrainReferenceQueues() {
2460      if (tryLock()) {
2461        try {
2462          drainReferenceQueues();
2463        } finally {
2464          unlock();
2465        }
2466      }
2467    }
2468
2469    /**
2470     * Drain the key and value reference queues, cleaning up internal entries containing garbage
2471     * collected keys or values.
2472     */
2473    @GuardedBy("this")
2474    void drainReferenceQueues() {
2475      if (map.usesKeyReferences()) {
2476        drainKeyReferenceQueue();
2477      }
2478      if (map.usesValueReferences()) {
2479        drainValueReferenceQueue();
2480      }
2481    }
2482
2483    @GuardedBy("this")
2484    void drainKeyReferenceQueue() {
2485      Reference<? extends K> ref;
2486      int i = 0;
2487      while ((ref = keyReferenceQueue.poll()) != null) {
2488        @SuppressWarnings("unchecked")
2489        ReferenceEntry<K, V> entry = (ReferenceEntry<K, V>) ref;
2490        map.reclaimKey(entry);
2491        if (++i == DRAIN_MAX) {
2492          break;
2493        }
2494      }
2495    }
2496
2497    @GuardedBy("this")
2498    void drainValueReferenceQueue() {
2499      Reference<? extends V> ref;
2500      int i = 0;
2501      while ((ref = valueReferenceQueue.poll()) != null) {
2502        @SuppressWarnings("unchecked")
2503        ValueReference<K, V> valueReference = (ValueReference<K, V>) ref;
2504        map.reclaimValue(valueReference);
2505        if (++i == DRAIN_MAX) {
2506          break;
2507        }
2508      }
2509    }
2510
2511    /**
2512     * Clears all entries from the key and value reference queues.
2513     */
2514    void clearReferenceQueues() {
2515      if (map.usesKeyReferences()) {
2516        clearKeyReferenceQueue();
2517      }
2518      if (map.usesValueReferences()) {
2519        clearValueReferenceQueue();
2520      }
2521    }
2522
2523    void clearKeyReferenceQueue() {
2524      while (keyReferenceQueue.poll() != null) {}
2525    }
2526
2527    void clearValueReferenceQueue() {
2528      while (valueReferenceQueue.poll() != null) {}
2529    }
2530
2531    // recency queue, shared by expiration and eviction
2532
2533    /**
2534     * Records the relative order in which this read was performed by adding {@code entry} to the
2535     * recency queue. At write-time, or when the queue is full past the threshold, the queue will
2536     * be drained and the entries therein processed.
2537     *
2538     * <p>Note: locked reads should use {@link #recordLockedRead}.
2539     */
2540    void recordRead(ReferenceEntry<K, V> entry, long now) {
2541      if (map.recordsAccess()) {
2542        entry.setAccessTime(now);
2543      }
2544      recencyQueue.add(entry);
2545    }
2546
2547    /**
2548     * Updates the eviction metadata that {@code entry} was just read. This currently amounts to
2549     * adding {@code entry} to relevant eviction lists.
2550     *
2551     * <p>Note: this method should only be called under lock, as it directly manipulates the
2552     * eviction queues. Unlocked reads should use {@link #recordRead}.
2553     */
2554    @GuardedBy("this")
2555    void recordLockedRead(ReferenceEntry<K, V> entry, long now) {
2556      if (map.recordsAccess()) {
2557        entry.setAccessTime(now);
2558      }
2559      accessQueue.add(entry);
2560    }
2561
2562    /**
2563     * Updates eviction metadata that {@code entry} was just written. This currently amounts to
2564     * adding {@code entry} to relevant eviction lists.
2565     */
2566    @GuardedBy("this")
2567    void recordWrite(ReferenceEntry<K, V> entry, int weight, long now) {
2568      // we are already under lock, so drain the recency queue immediately
2569      drainRecencyQueue();
2570      totalWeight += weight;
2571
2572      if (map.recordsAccess()) {
2573        entry.setAccessTime(now);
2574      }
2575      if (map.recordsWrite()) {
2576        entry.setWriteTime(now);
2577      }
2578      accessQueue.add(entry);
2579      writeQueue.add(entry);
2580    }
2581
2582    /**
2583     * Drains the recency queue, updating eviction metadata that the entries therein were read in
2584     * the specified relative order. This currently amounts to adding them to relevant eviction
2585     * lists (accounting for the fact that they could have been removed from the map since being
2586     * added to the recency queue).
2587     */
2588    @GuardedBy("this")
2589    void drainRecencyQueue() {
2590      ReferenceEntry<K, V> e;
2591      while ((e = recencyQueue.poll()) != null) {
2592        // An entry may be in the recency queue despite it being removed from
2593        // the map . This can occur when the entry was concurrently read while a
2594        // writer is removing it from the segment or after a clear has removed
2595        // all of the segment's entries.
2596        if (accessQueue.contains(e)) {
2597          accessQueue.add(e);
2598        }
2599      }
2600    }
2601
2602    // expiration
2603
2604    /**
2605     * Cleanup expired entries when the lock is available.
2606     */
2607    void tryExpireEntries(long now) {
2608      if (tryLock()) {
2609        try {
2610          expireEntries(now);
2611        } finally {
2612          unlock();
2613          // don't call postWriteCleanup as we're in a read
2614        }
2615      }
2616    }
2617
2618    @GuardedBy("this")
2619    void expireEntries(long now) {
2620      drainRecencyQueue();
2621
2622      ReferenceEntry<K, V> e;
2623      while ((e = writeQueue.peek()) != null && map.isExpired(e, now)) {
2624        if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) {
2625          throw new AssertionError();
2626        }
2627      }
2628      while ((e = accessQueue.peek()) != null && map.isExpired(e, now)) {
2629        if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) {
2630          throw new AssertionError();
2631        }
2632      }
2633    }
2634
2635    // eviction
2636
2637    @GuardedBy("this")
2638    void enqueueNotification(ReferenceEntry<K, V> entry, RemovalCause cause) {
2639      enqueueNotification(entry.getKey(), entry.getHash(), entry.getValueReference(), cause);
2640    }
2641
2642    @GuardedBy("this")
2643    void enqueueNotification(@Nullable K key, int hash, ValueReference<K, V> valueReference,
2644        RemovalCause cause) {
2645      totalWeight -= valueReference.getWeight();
2646      if (cause.wasEvicted()) {
2647        statsCounter.recordEviction();
2648      }
2649      if (map.removalNotificationQueue != DISCARDING_QUEUE) {
2650        V value = valueReference.get();
2651        RemovalNotification<K, V> notification = new RemovalNotification<K, V>(key, value, cause);
2652        map.removalNotificationQueue.offer(notification);
2653      }
2654    }
2655
2656    /**
2657     * Performs eviction if the segment is full. This should only be called prior to adding a new
2658     * entry and increasing {@code count}.
2659     */
2660    @GuardedBy("this")
2661    void evictEntries() {
2662      if (!map.evictsBySize()) {
2663        return;
2664      }
2665
2666      drainRecencyQueue();
2667      while (totalWeight > maxSegmentWeight) {
2668        ReferenceEntry<K, V> e = getNextEvictable();
2669        if (!removeEntry(e, e.getHash(), RemovalCause.SIZE)) {
2670          throw new AssertionError();
2671        }
2672      }
2673    }
2674
2675    // TODO(fry): instead implement this with an eviction head
2676    @GuardedBy("this")
2677    ReferenceEntry<K, V> getNextEvictable() {
2678      for (ReferenceEntry<K, V> e : accessQueue) {
2679        int weight = e.getValueReference().getWeight();
2680        if (weight > 0) {
2681          return e;
2682        }
2683      }
2684      throw new AssertionError();
2685    }
2686
2687    /**
2688     * Returns first entry of bin for given hash.
2689     */
2690    ReferenceEntry<K, V> getFirst(int hash) {
2691      // read this volatile field only once
2692      AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2693      return table.get(hash & (table.length() - 1));
2694    }
2695
2696    // Specialized implementations of map methods
2697
2698    @Nullable
2699    ReferenceEntry<K, V> getEntry(Object key, int hash) {
2700      for (ReferenceEntry<K, V> e = getFirst(hash); e != null; e = e.getNext()) {
2701        if (e.getHash() != hash) {
2702          continue;
2703        }
2704
2705        K entryKey = e.getKey();
2706        if (entryKey == null) {
2707          tryDrainReferenceQueues();
2708          continue;
2709        }
2710
2711        if (map.keyEquivalence.equivalent(key, entryKey)) {
2712          return e;
2713        }
2714      }
2715
2716      return null;
2717    }
2718
2719    @Nullable
2720    ReferenceEntry<K, V> getLiveEntry(Object key, int hash, long now) {
2721      ReferenceEntry<K, V> e = getEntry(key, hash);
2722      if (e == null) {
2723        return null;
2724      } else if (map.isExpired(e, now)) {
2725        tryExpireEntries(now);
2726        return null;
2727      }
2728      return e;
2729    }
2730
2731    /**
2732     * Gets the value from an entry. Returns null if the entry is invalid, partially-collected,
2733     * loading, or expired.
2734     */
2735    V getLiveValue(ReferenceEntry<K, V> entry, long now) {
2736      if (entry.getKey() == null) {
2737        tryDrainReferenceQueues();
2738        return null;
2739      }
2740      V value = entry.getValueReference().get();
2741      if (value == null) {
2742        tryDrainReferenceQueues();
2743        return null;
2744      }
2745
2746      if (map.isExpired(entry, now)) {
2747        tryExpireEntries(now);
2748        return null;
2749      }
2750      return value;
2751    }
2752
2753    @Nullable
2754    V get(Object key, int hash) {
2755      try {
2756        if (count != 0) { // read-volatile
2757          long now = map.ticker.read();
2758          ReferenceEntry<K, V> e = getLiveEntry(key, hash, now);
2759          if (e == null) {
2760            return null;
2761          }
2762
2763          V value = e.getValueReference().get();
2764          if (value != null) {
2765            recordRead(e, now);
2766            return scheduleRefresh(e, e.getKey(), hash, value, now, map.defaultLoader);
2767          }
2768          tryDrainReferenceQueues();
2769        }
2770        return null;
2771      } finally {
2772        postReadCleanup();
2773      }
2774    }
2775
2776    boolean containsKey(Object key, int hash) {
2777      try {
2778        if (count != 0) { // read-volatile
2779          long now = map.ticker.read();
2780          ReferenceEntry<K, V> e = getLiveEntry(key, hash, now);
2781          if (e == null) {
2782            return false;
2783          }
2784          return e.getValueReference().get() != null;
2785        }
2786
2787        return false;
2788      } finally {
2789        postReadCleanup();
2790      }
2791    }
2792
2793    /**
2794     * This method is a convenience for testing. Code should call {@link
2795     * LocalCache#containsValue} directly.
2796     */
2797    @VisibleForTesting
2798    boolean containsValue(Object value) {
2799      try {
2800        if (count != 0) { // read-volatile
2801          long now = map.ticker.read();
2802          AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2803          int length = table.length();
2804          for (int i = 0; i < length; ++i) {
2805            for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) {
2806              V entryValue = getLiveValue(e, now);
2807              if (entryValue == null) {
2808                continue;
2809              }
2810              if (map.valueEquivalence.equivalent(value, entryValue)) {
2811                return true;
2812              }
2813            }
2814          }
2815        }
2816
2817        return false;
2818      } finally {
2819        postReadCleanup();
2820      }
2821    }
2822
2823    @Nullable
2824    V put(K key, int hash, V value, boolean onlyIfAbsent) {
2825      lock();
2826      try {
2827        long now = map.ticker.read();
2828        preWriteCleanup(now);
2829
2830        int newCount = this.count + 1;
2831        if (newCount > this.threshold) { // ensure capacity
2832          expand();
2833          newCount = this.count + 1;
2834        }
2835
2836        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2837        int index = hash & (table.length() - 1);
2838        ReferenceEntry<K, V> first = table.get(index);
2839
2840        // Look for an existing entry.
2841        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
2842          K entryKey = e.getKey();
2843          if (e.getHash() == hash && entryKey != null
2844              && map.keyEquivalence.equivalent(key, entryKey)) {
2845            // We found an existing entry.
2846
2847            ValueReference<K, V> valueReference = e.getValueReference();
2848            V entryValue = valueReference.get();
2849
2850            if (entryValue == null) {
2851              ++modCount;
2852              if (valueReference.isActive()) {
2853                enqueueNotification(key, hash, valueReference, RemovalCause.COLLECTED);
2854                setValue(e, key, value, now);
2855                newCount = this.count; // count remains unchanged
2856              } else {
2857                setValue(e, key, value, now);
2858                newCount = this.count + 1;
2859              }
2860              this.count = newCount; // write-volatile
2861              evictEntries();
2862              return null;
2863            } else if (onlyIfAbsent) {
2864              // Mimic
2865              // "if (!map.containsKey(key)) ...
2866              // else return map.get(key);
2867              recordLockedRead(e, now);
2868              return entryValue;
2869            } else {
2870              // clobber existing entry, count remains unchanged
2871              ++modCount;
2872              enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
2873              setValue(e, key, value, now);
2874              evictEntries();
2875              return entryValue;
2876            }
2877          }
2878        }
2879
2880        // Create a new entry.
2881        ++modCount;
2882        ReferenceEntry<K, V> newEntry = newEntry(key, hash, first);
2883        setValue(newEntry, key, value, now);
2884        table.set(index, newEntry);
2885        newCount = this.count + 1;
2886        this.count = newCount; // write-volatile
2887        evictEntries();
2888        return null;
2889      } finally {
2890        unlock();
2891        postWriteCleanup();
2892      }
2893    }
2894
2895    /**
2896     * Expands the table if possible.
2897     */
2898    @GuardedBy("this")
2899    void expand() {
2900      AtomicReferenceArray<ReferenceEntry<K, V>> oldTable = table;
2901      int oldCapacity = oldTable.length();
2902      if (oldCapacity >= MAXIMUM_CAPACITY) {
2903        return;
2904      }
2905
2906      /*
2907       * Reclassify nodes in each list to new Map. Because we are using power-of-two expansion, the
2908       * elements from each bin must either stay at same index, or move with a power of two offset.
2909       * We eliminate unnecessary node creation by catching cases where old nodes can be reused
2910       * because their next fields won't change. Statistically, at the default threshold, only
2911       * about one-sixth of them need cloning when a table doubles. The nodes they replace will be
2912       * garbage collectable as soon as they are no longer referenced by any reader thread that may
2913       * be in the midst of traversing table right now.
2914       */
2915
2916      int newCount = count;
2917      AtomicReferenceArray<ReferenceEntry<K, V>> newTable = newEntryArray(oldCapacity << 1);
2918      threshold = newTable.length() * 3 / 4;
2919      int newMask = newTable.length() - 1;
2920      for (int oldIndex = 0; oldIndex < oldCapacity; ++oldIndex) {
2921        // We need to guarantee that any existing reads of old Map can
2922        // proceed. So we cannot yet null out each bin.
2923        ReferenceEntry<K, V> head = oldTable.get(oldIndex);
2924
2925        if (head != null) {
2926          ReferenceEntry<K, V> next = head.getNext();
2927          int headIndex = head.getHash() & newMask;
2928
2929          // Single node on list
2930          if (next == null) {
2931            newTable.set(headIndex, head);
2932          } else {
2933            // Reuse the consecutive sequence of nodes with the same target
2934            // index from the end of the list. tail points to the first
2935            // entry in the reusable list.
2936            ReferenceEntry<K, V> tail = head;
2937            int tailIndex = headIndex;
2938            for (ReferenceEntry<K, V> e = next; e != null; e = e.getNext()) {
2939              int newIndex = e.getHash() & newMask;
2940              if (newIndex != tailIndex) {
2941                // The index changed. We'll need to copy the previous entry.
2942                tailIndex = newIndex;
2943                tail = e;
2944              }
2945            }
2946            newTable.set(tailIndex, tail);
2947
2948            // Clone nodes leading up to the tail.
2949            for (ReferenceEntry<K, V> e = head; e != tail; e = e.getNext()) {
2950              int newIndex = e.getHash() & newMask;
2951              ReferenceEntry<K, V> newNext = newTable.get(newIndex);
2952              ReferenceEntry<K, V> newFirst = copyEntry(e, newNext);
2953              if (newFirst != null) {
2954                newTable.set(newIndex, newFirst);
2955              } else {
2956                removeCollectedEntry(e);
2957                newCount--;
2958              }
2959            }
2960          }
2961        }
2962      }
2963      table = newTable;
2964      this.count = newCount;
2965    }
2966
2967    boolean replace(K key, int hash, V oldValue, V newValue) {
2968      lock();
2969      try {
2970        long now = map.ticker.read();
2971        preWriteCleanup(now);
2972
2973        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2974        int index = hash & (table.length() - 1);
2975        ReferenceEntry<K, V> first = table.get(index);
2976
2977        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
2978          K entryKey = e.getKey();
2979          if (e.getHash() == hash && entryKey != null
2980              && map.keyEquivalence.equivalent(key, entryKey)) {
2981            ValueReference<K, V> valueReference = e.getValueReference();
2982            V entryValue = valueReference.get();
2983            if (entryValue == null) {
2984              if (valueReference.isActive()) {
2985                // If the value disappeared, this entry is partially collected.
2986                int newCount = this.count - 1;
2987                ++modCount;
2988                ReferenceEntry<K, V> newFirst = removeValueFromChain(
2989                    first, e, entryKey, hash, valueReference, RemovalCause.COLLECTED);
2990                newCount = this.count - 1;
2991                table.set(index, newFirst);
2992                this.count = newCount; // write-volatile
2993              }
2994              return false;
2995            }
2996
2997            if (map.valueEquivalence.equivalent(oldValue, entryValue)) {
2998              ++modCount;
2999              enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
3000              setValue(e, key, newValue, now);
3001              evictEntries();
3002              return true;
3003            } else {
3004              // Mimic
3005              // "if (map.containsKey(key) && map.get(key).equals(oldValue))..."
3006              recordLockedRead(e, now);
3007              return false;
3008            }
3009          }
3010        }
3011
3012        return false;
3013      } finally {
3014        unlock();
3015        postWriteCleanup();
3016      }
3017    }
3018
3019    @Nullable
3020    V replace(K key, int hash, V newValue) {
3021      lock();
3022      try {
3023        long now = map.ticker.read();
3024        preWriteCleanup(now);
3025
3026        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3027        int index = hash & (table.length() - 1);
3028        ReferenceEntry<K, V> first = table.get(index);
3029
3030        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3031          K entryKey = e.getKey();
3032          if (e.getHash() == hash && entryKey != null
3033              && map.keyEquivalence.equivalent(key, entryKey)) {
3034            ValueReference<K, V> valueReference = e.getValueReference();
3035            V entryValue = valueReference.get();
3036            if (entryValue == null) {
3037              if (valueReference.isActive()) {
3038                // If the value disappeared, this entry is partially collected.
3039                int newCount = this.count - 1;
3040                ++modCount;
3041                ReferenceEntry<K, V> newFirst = removeValueFromChain(
3042                    first, e, entryKey, hash, valueReference, RemovalCause.COLLECTED);
3043                newCount = this.count - 1;
3044                table.set(index, newFirst);
3045                this.count = newCount; // write-volatile
3046              }
3047              return null;
3048            }
3049
3050            ++modCount;
3051            enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
3052            setValue(e, key, newValue, now);
3053            evictEntries();
3054            return entryValue;
3055          }
3056        }
3057
3058        return null;
3059      } finally {
3060        unlock();
3061        postWriteCleanup();
3062      }
3063    }
3064
3065    @Nullable
3066    V remove(Object key, int hash) {
3067      lock();
3068      try {
3069        long now = map.ticker.read();
3070        preWriteCleanup(now);
3071
3072        int newCount = this.count - 1;
3073        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3074        int index = hash & (table.length() - 1);
3075        ReferenceEntry<K, V> first = table.get(index);
3076
3077        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3078          K entryKey = e.getKey();
3079          if (e.getHash() == hash && entryKey != null
3080              && map.keyEquivalence.equivalent(key, entryKey)) {
3081            ValueReference<K, V> valueReference = e.getValueReference();
3082            V entryValue = valueReference.get();
3083
3084            RemovalCause cause;
3085            if (entryValue != null) {
3086              cause = RemovalCause.EXPLICIT;
3087            } else if (valueReference.isActive()) {
3088              cause = RemovalCause.COLLECTED;
3089            } else {
3090              // currently loading
3091              return null;
3092            }
3093
3094            ++modCount;
3095            ReferenceEntry<K, V> newFirst = removeValueFromChain(
3096                first, e, entryKey, hash, valueReference, cause);
3097            newCount = this.count - 1;
3098            table.set(index, newFirst);
3099            this.count = newCount; // write-volatile
3100            return entryValue;
3101          }
3102        }
3103
3104        return null;
3105      } finally {
3106        unlock();
3107        postWriteCleanup();
3108      }
3109    }
3110
3111    boolean storeLoadedValue(K key, int hash, LoadingValueReference<K, V> oldValueReference,
3112        V newValue) {
3113      lock();
3114      try {
3115        long now = map.ticker.read();
3116        preWriteCleanup(now);
3117
3118        int newCount = this.count + 1;
3119        if (newCount > this.threshold) { // ensure capacity
3120          expand();
3121          newCount = this.count + 1;
3122        }
3123
3124        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3125        int index = hash & (table.length() - 1);
3126        ReferenceEntry<K, V> first = table.get(index);
3127
3128        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3129          K entryKey = e.getKey();
3130          if (e.getHash() == hash && entryKey != null
3131              && map.keyEquivalence.equivalent(key, entryKey)) {
3132            ValueReference<K, V> valueReference = e.getValueReference();
3133            V entryValue = valueReference.get();
3134            // replace the old LoadingValueReference if it's live, otherwise
3135            // perform a putIfAbsent
3136            if (oldValueReference == valueReference
3137                || (entryValue == null && valueReference != UNSET)) {
3138              ++modCount;
3139              if (oldValueReference.isActive()) {
3140                RemovalCause cause =
3141                    (entryValue == null) ? RemovalCause.COLLECTED : RemovalCause.REPLACED;
3142                enqueueNotification(key, hash, oldValueReference, cause);
3143                newCount--;
3144              }
3145              setValue(e, key, newValue, now);
3146              this.count = newCount; // write-volatile
3147              evictEntries();
3148              return true;
3149            }
3150
3151            // the loaded value was already clobbered
3152            valueReference = new WeightedStrongValueReference<K, V>(newValue, 0);
3153            enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
3154            return false;
3155          }
3156        }
3157
3158        ++modCount;
3159        ReferenceEntry<K, V> newEntry = newEntry(key, hash, first);
3160        setValue(newEntry, key, newValue, now);
3161        table.set(index, newEntry);
3162        this.count = newCount; // write-volatile
3163        evictEntries();
3164        return true;
3165      } finally {
3166        unlock();
3167        postWriteCleanup();
3168      }
3169    }
3170
3171    boolean remove(Object key, int hash, Object value) {
3172      lock();
3173      try {
3174        long now = map.ticker.read();
3175        preWriteCleanup(now);
3176
3177        int newCount = this.count - 1;
3178        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3179        int index = hash & (table.length() - 1);
3180        ReferenceEntry<K, V> first = table.get(index);
3181
3182        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3183          K entryKey = e.getKey();
3184          if (e.getHash() == hash && entryKey != null
3185              && map.keyEquivalence.equivalent(key, entryKey)) {
3186            ValueReference<K, V> valueReference = e.getValueReference();
3187            V entryValue = valueReference.get();
3188
3189            RemovalCause cause;
3190            if (map.valueEquivalence.equivalent(value, entryValue)) {
3191              cause = RemovalCause.EXPLICIT;
3192            } else if (entryValue == null && valueReference.isActive()) {
3193              cause = RemovalCause.COLLECTED;
3194            } else {
3195              // currently loading
3196              return false;
3197            }
3198
3199            ++modCount;
3200            ReferenceEntry<K, V> newFirst = removeValueFromChain(
3201                first, e, entryKey, hash, valueReference, cause);
3202            newCount = this.count - 1;
3203            table.set(index, newFirst);
3204            this.count = newCount; // write-volatile
3205            return (cause == RemovalCause.EXPLICIT);
3206          }
3207        }
3208
3209        return false;
3210      } finally {
3211        unlock();
3212        postWriteCleanup();
3213      }
3214    }
3215
3216    void clear() {
3217      if (count != 0) { // read-volatile
3218        lock();
3219        try {
3220          AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3221          for (int i = 0; i < table.length(); ++i) {
3222            for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) {
3223              // Loading references aren't actually in the map yet.
3224              if (e.getValueReference().isActive()) {
3225                enqueueNotification(e, RemovalCause.EXPLICIT);
3226              }
3227            }
3228          }
3229          for (int i = 0; i < table.length(); ++i) {
3230            table.set(i, null);
3231          }
3232          clearReferenceQueues();
3233          writeQueue.clear();
3234          accessQueue.clear();
3235          readCount.set(0);
3236
3237          ++modCount;
3238          count = 0; // write-volatile
3239        } finally {
3240          unlock();
3241          postWriteCleanup();
3242        }
3243      }
3244    }
3245
3246    @GuardedBy("this")
3247    @Nullable
3248    ReferenceEntry<K, V> removeValueFromChain(ReferenceEntry<K, V> first,
3249        ReferenceEntry<K, V> entry, @Nullable K key, int hash, ValueReference<K, V> valueReference,
3250        RemovalCause cause) {
3251      enqueueNotification(key, hash, valueReference, cause);
3252      writeQueue.remove(entry);
3253      accessQueue.remove(entry);
3254
3255      if (valueReference.isLoading()) {
3256        valueReference.notifyNewValue(null);
3257        return first;
3258      } else {
3259        return removeEntryFromChain(first, entry);
3260      }
3261    }
3262
3263    @GuardedBy("this")
3264    @Nullable
3265    ReferenceEntry<K, V> removeEntryFromChain(ReferenceEntry<K, V> first,
3266        ReferenceEntry<K, V> entry) {
3267      int newCount = count;
3268      ReferenceEntry<K, V> newFirst = entry.getNext();
3269      for (ReferenceEntry<K, V> e = first; e != entry; e = e.getNext()) {
3270        ReferenceEntry<K, V> next = copyEntry(e, newFirst);
3271        if (next != null) {
3272          newFirst = next;
3273        } else {
3274          removeCollectedEntry(e);
3275          newCount--;
3276        }
3277      }
3278      this.count = newCount;
3279      return newFirst;
3280    }
3281
3282    @GuardedBy("this")
3283    void removeCollectedEntry(ReferenceEntry<K, V> entry) {
3284      enqueueNotification(entry, RemovalCause.COLLECTED);
3285      writeQueue.remove(entry);
3286      accessQueue.remove(entry);
3287    }
3288
3289    /**
3290     * Removes an entry whose key has been garbage collected.
3291     */
3292    boolean reclaimKey(ReferenceEntry<K, V> entry, int hash) {
3293      lock();
3294      try {
3295        int newCount = count - 1;
3296        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3297        int index = hash & (table.length() - 1);
3298        ReferenceEntry<K, V> first = table.get(index);
3299
3300        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3301          if (e == entry) {
3302            ++modCount;
3303            ReferenceEntry<K, V> newFirst = removeValueFromChain(
3304                first, e, e.getKey(), hash, e.getValueReference(), RemovalCause.COLLECTED);
3305            newCount = this.count - 1;
3306            table.set(index, newFirst);
3307            this.count = newCount; // write-volatile
3308            return true;
3309          }
3310        }
3311
3312        return false;
3313      } finally {
3314        unlock();
3315        postWriteCleanup();
3316      }
3317    }
3318
3319    /**
3320     * Removes an entry whose value has been garbage collected.
3321     */
3322    boolean reclaimValue(K key, int hash, ValueReference<K, V> valueReference) {
3323      lock();
3324      try {
3325        int newCount = this.count - 1;
3326        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3327        int index = hash & (table.length() - 1);
3328        ReferenceEntry<K, V> first = table.get(index);
3329
3330        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3331          K entryKey = e.getKey();
3332          if (e.getHash() == hash && entryKey != null
3333              && map.keyEquivalence.equivalent(key, entryKey)) {
3334            ValueReference<K, V> v = e.getValueReference();
3335            if (v == valueReference) {
3336              ++modCount;
3337              ReferenceEntry<K, V> newFirst = removeValueFromChain(
3338                  first, e, entryKey, hash, valueReference, RemovalCause.COLLECTED);
3339              newCount = this.count - 1;
3340              table.set(index, newFirst);
3341              this.count = newCount; // write-volatile
3342              return true;
3343            }
3344            return false;
3345          }
3346        }
3347
3348        return false;
3349      } finally {
3350        unlock();
3351        if (!isHeldByCurrentThread()) { // don't cleanup inside of put
3352          postWriteCleanup();
3353        }
3354      }
3355    }
3356
3357    boolean removeLoadingValue(K key, int hash, LoadingValueReference<K, V> valueReference) {
3358      lock();
3359      try {
3360        AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3361        int index = hash & (table.length() - 1);
3362        ReferenceEntry<K, V> first = table.get(index);
3363
3364        for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3365          K entryKey = e.getKey();
3366          if (e.getHash() == hash && entryKey != null
3367              && map.keyEquivalence.equivalent(key, entryKey)) {
3368            ValueReference<K, V> v = e.getValueReference();
3369            if (v == valueReference) {
3370              if (valueReference.isActive()) {
3371                e.setValueReference(valueReference.getOldValue());
3372              } else {
3373                ReferenceEntry<K, V> newFirst = removeEntryFromChain(first, e);
3374                table.set(index, newFirst);
3375              }
3376              return true;
3377            }
3378            return false;
3379          }
3380        }
3381
3382        return false;
3383      } finally {
3384        unlock();
3385        postWriteCleanup();
3386      }
3387    }
3388
3389    @GuardedBy("this")
3390    boolean removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause) {
3391      int newCount = this.count - 1;
3392      AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3393      int index = hash & (table.length() - 1);
3394      ReferenceEntry<K, V> first = table.get(index);
3395
3396      for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3397        if (e == entry) {
3398          ++modCount;
3399          ReferenceEntry<K, V> newFirst = removeValueFromChain(
3400              first, e, e.getKey(), hash, e.getValueReference(), cause);
3401          newCount = this.count - 1;
3402          table.set(index, newFirst);
3403          this.count = newCount; // write-volatile
3404          return true;
3405        }
3406      }
3407
3408      return false;
3409    }
3410
3411    /**
3412     * Performs routine cleanup following a read. Normally cleanup happens during writes. If cleanup
3413     * is not observed after a sufficient number of reads, try cleaning up from the read thread.
3414     */
3415    void postReadCleanup() {
3416      if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) {
3417        cleanUp();
3418      }
3419    }
3420
3421    /**
3422     * Performs routine cleanup prior to executing a write. This should be called every time a
3423     * write thread acquires the segment lock, immediately after acquiring the lock.
3424     *
3425     * <p>Post-condition: expireEntries has been run.
3426     */
3427    @GuardedBy("this")
3428    void preWriteCleanup(long now) {
3429      runLockedCleanup(now);
3430    }
3431
3432    /**
3433     * Performs routine cleanup following a write.
3434     */
3435    void postWriteCleanup() {
3436      runUnlockedCleanup();
3437    }
3438
3439    void cleanUp() {
3440      long now = map.ticker.read();
3441      runLockedCleanup(now);
3442      runUnlockedCleanup();
3443    }
3444
3445    void runLockedCleanup(long now) {
3446      if (tryLock()) {
3447        try {
3448          drainReferenceQueues();
3449          expireEntries(now); // calls drainRecencyQueue
3450          readCount.set(0);
3451        } finally {
3452          unlock();
3453        }
3454      }
3455    }
3456
3457    void runUnlockedCleanup() {
3458      // locked cleanup may generate notifications we can send unlocked
3459      if (!isHeldByCurrentThread()) {
3460        map.processPendingNotifications();
3461      }
3462    }
3463
3464  }
3465
3466  static class LoadingValueReference<K, V> implements ValueReference<K, V> {
3467    volatile ValueReference<K, V> oldValue;
3468
3469    // TODO(fry): rename get, then extend AbstractFuture instead of containing SettableFuture
3470    final SettableFuture<V> futureValue = SettableFuture.create();
3471    final Stopwatch stopwatch = Stopwatch.createUnstarted();
3472
3473    public LoadingValueReference() {
3474      this(LocalCache.<K, V>unset());
3475    }
3476
3477    public LoadingValueReference(ValueReference<K, V> oldValue) {
3478      this.oldValue = oldValue;
3479    }
3480
3481    @Override
3482    public boolean isLoading() {
3483      return true;
3484    }
3485
3486    @Override
3487    public boolean isActive() {
3488      return oldValue.isActive();
3489    }
3490
3491    @Override
3492    public int getWeight() {
3493      return oldValue.getWeight();
3494    }
3495
3496    public boolean set(@Nullable V newValue) {
3497      return futureValue.set(newValue);
3498    }
3499
3500    public boolean setException(Throwable t) {
3501      return futureValue.setException(t);
3502    }
3503
3504    private ListenableFuture<V> fullyFailedFuture(Throwable t) {
3505      return Futures.immediateFailedFuture(t);
3506    }
3507
3508    @Override
3509    public void notifyNewValue(@Nullable V newValue) {
3510      if (newValue != null) {
3511        // The pending load was clobbered by a manual write.
3512        // Unblock all pending gets, and have them return the new value.
3513        set(newValue);
3514      } else {
3515        // The pending load was removed. Delay notifications until loading completes.
3516        oldValue = unset();
3517      }
3518
3519      // TODO(fry): could also cancel loading if we had a handle on its future
3520    }
3521
3522    public ListenableFuture<V> loadFuture(K key, CacheLoader<? super K, V> loader) {
3523      stopwatch.start();
3524      V previousValue = oldValue.get();
3525      try {
3526        if (previousValue == null) {
3527          V newValue = loader.load(key);
3528          return set(newValue) ? futureValue : Futures.immediateFuture(newValue);
3529        }
3530        ListenableFuture<V> newValue = loader.reload(key, previousValue);
3531        if (newValue == null) {
3532          return Futures.immediateFuture(null);
3533        }
3534        // To avoid a race, make sure the refreshed value is set into loadingValueReference
3535        // *before* returning newValue from the cache query.
3536        return Futures.transform(newValue, new Function<V, V>() {
3537          @Override
3538          public V apply(V newValue) {
3539            LoadingValueReference.this.set(newValue);
3540            return newValue;
3541          }
3542        });
3543      } catch (Throwable t) {
3544        if (t instanceof InterruptedException) {
3545          Thread.currentThread().interrupt();
3546        }
3547        return setException(t) ? futureValue : fullyFailedFuture(t);
3548      }
3549    }
3550
3551    public long elapsedNanos() {
3552      return stopwatch.elapsed(NANOSECONDS);
3553    }
3554
3555    @Override
3556    public V waitForValue() throws ExecutionException {
3557      return getUninterruptibly(futureValue);
3558    }
3559
3560    @Override
3561    public V get() {
3562      return oldValue.get();
3563    }
3564
3565    public ValueReference<K, V> getOldValue() {
3566      return oldValue;
3567    }
3568
3569    @Override
3570    public ReferenceEntry<K, V> getEntry() {
3571      return null;
3572    }
3573
3574    @Override
3575    public ValueReference<K, V> copyFor(
3576        ReferenceQueue<V> queue, @Nullable V value, ReferenceEntry<K, V> entry) {
3577      return this;
3578    }
3579  }
3580
3581  // Queues
3582
3583  /**
3584   * A custom queue for managing eviction order. Note that this is tightly integrated with {@code
3585   * ReferenceEntry}, upon which it relies to perform its linking.
3586   *
3587   * <p>Note that this entire implementation makes the assumption that all elements which are in
3588   * the map are also in this queue, and that all elements not in the queue are not in the map.
3589   *
3590   * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle
3591   * of the queue as part of copyWriteEntry, and (2) the contains method is highly optimized
3592   * for the current model.
3593   */
3594  static final class WriteQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> {
3595    final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() {
3596
3597      @Override
3598      public long getWriteTime() {
3599        return Long.MAX_VALUE;
3600      }
3601
3602      @Override
3603      public void setWriteTime(long time) {}
3604
3605      ReferenceEntry<K, V> nextWrite = this;
3606
3607      @Override
3608      public ReferenceEntry<K, V> getNextInWriteQueue() {
3609        return nextWrite;
3610      }
3611
3612      @Override
3613      public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
3614        this.nextWrite = next;
3615      }
3616
3617      ReferenceEntry<K, V> previousWrite = this;
3618
3619      @Override
3620      public ReferenceEntry<K, V> getPreviousInWriteQueue() {
3621        return previousWrite;
3622      }
3623
3624      @Override
3625      public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
3626        this.previousWrite = previous;
3627      }
3628    };
3629
3630    // implements Queue
3631
3632    @Override
3633    public boolean offer(ReferenceEntry<K, V> entry) {
3634      // unlink
3635      connectWriteOrder(entry.getPreviousInWriteQueue(), entry.getNextInWriteQueue());
3636
3637      // add to tail
3638      connectWriteOrder(head.getPreviousInWriteQueue(), entry);
3639      connectWriteOrder(entry, head);
3640
3641      return true;
3642    }
3643
3644    @Override
3645    public ReferenceEntry<K, V> peek() {
3646      ReferenceEntry<K, V> next = head.getNextInWriteQueue();
3647      return (next == head) ? null : next;
3648    }
3649
3650    @Override
3651    public ReferenceEntry<K, V> poll() {
3652      ReferenceEntry<K, V> next = head.getNextInWriteQueue();
3653      if (next == head) {
3654        return null;
3655      }
3656
3657      remove(next);
3658      return next;
3659    }
3660
3661    @Override
3662    @SuppressWarnings("unchecked")
3663    public boolean remove(Object o) {
3664      ReferenceEntry<K, V> e = (ReferenceEntry) o;
3665      ReferenceEntry<K, V> previous = e.getPreviousInWriteQueue();
3666      ReferenceEntry<K, V> next = e.getNextInWriteQueue();
3667      connectWriteOrder(previous, next);
3668      nullifyWriteOrder(e);
3669
3670      return next != NullEntry.INSTANCE;
3671    }
3672
3673    @Override
3674    @SuppressWarnings("unchecked")
3675    public boolean contains(Object o) {
3676      ReferenceEntry<K, V> e = (ReferenceEntry) o;
3677      return e.getNextInWriteQueue() != NullEntry.INSTANCE;
3678    }
3679
3680    @Override
3681    public boolean isEmpty() {
3682      return head.getNextInWriteQueue() == head;
3683    }
3684
3685    @Override
3686    public int size() {
3687      int size = 0;
3688      for (ReferenceEntry<K, V> e = head.getNextInWriteQueue(); e != head;
3689          e = e.getNextInWriteQueue()) {
3690        size++;
3691      }
3692      return size;
3693    }
3694
3695    @Override
3696    public void clear() {
3697      ReferenceEntry<K, V> e = head.getNextInWriteQueue();
3698      while (e != head) {
3699        ReferenceEntry<K, V> next = e.getNextInWriteQueue();
3700        nullifyWriteOrder(e);
3701        e = next;
3702      }
3703
3704      head.setNextInWriteQueue(head);
3705      head.setPreviousInWriteQueue(head);
3706    }
3707
3708    @Override
3709    public Iterator<ReferenceEntry<K, V>> iterator() {
3710      return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) {
3711        @Override
3712        protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) {
3713          ReferenceEntry<K, V> next = previous.getNextInWriteQueue();
3714          return (next == head) ? null : next;
3715        }
3716      };
3717    }
3718  }
3719
3720  /**
3721   * A custom queue for managing access order. Note that this is tightly integrated with
3722   * {@code ReferenceEntry}, upon which it reliese to perform its linking.
3723   *
3724   * <p>Note that this entire implementation makes the assumption that all elements which are in
3725   * the map are also in this queue, and that all elements not in the queue are not in the map.
3726   *
3727   * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle
3728   * of the queue as part of copyWriteEntry, and (2) the contains method is highly optimized
3729   * for the current model.
3730   */
3731  static final class AccessQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> {
3732    final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() {
3733
3734      @Override
3735      public long getAccessTime() {
3736        return Long.MAX_VALUE;
3737      }
3738
3739      @Override
3740      public void setAccessTime(long time) {}
3741
3742      ReferenceEntry<K, V> nextAccess = this;
3743
3744      @Override
3745      public ReferenceEntry<K, V> getNextInAccessQueue() {
3746        return nextAccess;
3747      }
3748
3749      @Override
3750      public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
3751        this.nextAccess = next;
3752      }
3753
3754      ReferenceEntry<K, V> previousAccess = this;
3755
3756      @Override
3757      public ReferenceEntry<K, V> getPreviousInAccessQueue() {
3758        return previousAccess;
3759      }
3760
3761      @Override
3762      public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
3763        this.previousAccess = previous;
3764      }
3765    };
3766
3767    // implements Queue
3768
3769    @Override
3770    public boolean offer(ReferenceEntry<K, V> entry) {
3771      // unlink
3772      connectAccessOrder(entry.getPreviousInAccessQueue(), entry.getNextInAccessQueue());
3773
3774      // add to tail
3775      connectAccessOrder(head.getPreviousInAccessQueue(), entry);
3776      connectAccessOrder(entry, head);
3777
3778      return true;
3779    }
3780
3781    @Override
3782    public ReferenceEntry<K, V> peek() {
3783      ReferenceEntry<K, V> next = head.getNextInAccessQueue();
3784      return (next == head) ? null : next;
3785    }
3786
3787    @Override
3788    public ReferenceEntry<K, V> poll() {
3789      ReferenceEntry<K, V> next = head.getNextInAccessQueue();
3790      if (next == head) {
3791        return null;
3792      }
3793
3794      remove(next);
3795      return next;
3796    }
3797
3798    @Override
3799    @SuppressWarnings("unchecked")
3800    public boolean remove(Object o) {
3801      ReferenceEntry<K, V> e = (ReferenceEntry) o;
3802      ReferenceEntry<K, V> previous = e.getPreviousInAccessQueue();
3803      ReferenceEntry<K, V> next = e.getNextInAccessQueue();
3804      connectAccessOrder(previous, next);
3805      nullifyAccessOrder(e);
3806
3807      return next != NullEntry.INSTANCE;
3808    }
3809
3810    @Override
3811    @SuppressWarnings("unchecked")
3812    public boolean contains(Object o) {
3813      ReferenceEntry<K, V> e = (ReferenceEntry) o;
3814      return e.getNextInAccessQueue() != NullEntry.INSTANCE;
3815    }
3816
3817    @Override
3818    public boolean isEmpty() {
3819      return head.getNextInAccessQueue() == head;
3820    }
3821
3822    @Override
3823    public int size() {
3824      int size = 0;
3825      for (ReferenceEntry<K, V> e = head.getNextInAccessQueue(); e != head;
3826          e = e.getNextInAccessQueue()) {
3827        size++;
3828      }
3829      return size;
3830    }
3831
3832    @Override
3833    public void clear() {
3834      ReferenceEntry<K, V> e = head.getNextInAccessQueue();
3835      while (e != head) {
3836        ReferenceEntry<K, V> next = e.getNextInAccessQueue();
3837        nullifyAccessOrder(e);
3838        e = next;
3839      }
3840
3841      head.setNextInAccessQueue(head);
3842      head.setPreviousInAccessQueue(head);
3843    }
3844
3845    @Override
3846    public Iterator<ReferenceEntry<K, V>> iterator() {
3847      return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) {
3848        @Override
3849        protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) {
3850          ReferenceEntry<K, V> next = previous.getNextInAccessQueue();
3851          return (next == head) ? null : next;
3852        }
3853      };
3854    }
3855  }
3856
3857  // Cache support
3858
3859  public void cleanUp() {
3860    for (Segment<?, ?> segment : segments) {
3861      segment.cleanUp();
3862    }
3863  }
3864
3865  // ConcurrentMap methods
3866
3867  @Override
3868  public boolean isEmpty() {
3869    /*
3870     * Sum per-segment modCounts to avoid mis-reporting when elements are concurrently added and
3871     * removed in one segment while checking another, in which case the table was never actually
3872     * empty at any point. (The sum ensures accuracy up through at least 1<<31 per-segment
3873     * modifications before recheck.)  Method containsValue() uses similar constructions for
3874     * stability checks.
3875     */
3876    long sum = 0L;
3877    Segment<K, V>[] segments = this.segments;
3878    for (int i = 0; i < segments.length; ++i) {
3879      if (segments[i].count != 0) {
3880        return false;
3881      }
3882      sum += segments[i].modCount;
3883    }
3884
3885    if (sum != 0L) { // recheck unless no modifications
3886      for (int i = 0; i < segments.length; ++i) {
3887        if (segments[i].count != 0) {
3888          return false;
3889        }
3890        sum -= segments[i].modCount;
3891      }
3892      if (sum != 0L) {
3893        return false;
3894      }
3895    }
3896    return true;
3897  }
3898
3899  long longSize() {
3900    Segment<K, V>[] segments = this.segments;
3901    long sum = 0;
3902    for (int i = 0; i < segments.length; ++i) {
3903      sum += segments[i].count;
3904    }
3905    return sum;
3906  }
3907
3908  @Override
3909  public int size() {
3910    return Ints.saturatedCast(longSize());
3911  }
3912
3913  @Override
3914  @Nullable
3915  public V get(@Nullable Object key) {
3916    if (key == null) {
3917      return null;
3918    }
3919    int hash = hash(key);
3920    return segmentFor(hash).get(key, hash);
3921  }
3922
3923  @Nullable
3924  public V getIfPresent(Object key) {
3925    int hash = hash(checkNotNull(key));
3926    V value = segmentFor(hash).get(key, hash);
3927    if (value == null) {
3928      globalStatsCounter.recordMisses(1);
3929    } else {
3930      globalStatsCounter.recordHits(1);
3931    }
3932    return value;
3933  }
3934
3935  V get(K key, CacheLoader<? super K, V> loader) throws ExecutionException {
3936    int hash = hash(checkNotNull(key));
3937    return segmentFor(hash).get(key, hash, loader);
3938  }
3939
3940  V getOrLoad(K key) throws ExecutionException {
3941    return get(key, defaultLoader);
3942  }
3943
3944  ImmutableMap<K, V> getAllPresent(Iterable<?> keys) {
3945    int hits = 0;
3946    int misses = 0;
3947
3948    Map<K, V> result = Maps.newLinkedHashMap();
3949    for (Object key : keys) {
3950      V value = get(key);
3951      if (value == null) {
3952        misses++;
3953      } else {
3954        // TODO(fry): store entry key instead of query key
3955        @SuppressWarnings("unchecked")
3956        K castKey = (K) key;
3957        result.put(castKey, value);
3958        hits++;
3959      }
3960    }
3961    globalStatsCounter.recordHits(hits);
3962    globalStatsCounter.recordMisses(misses);
3963    return ImmutableMap.copyOf(result);
3964  }
3965
3966  ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException {
3967    int hits = 0;
3968    int misses = 0;
3969
3970    Map<K, V> result = Maps.newLinkedHashMap();
3971    Set<K> keysToLoad = Sets.newLinkedHashSet();
3972    for (K key : keys) {
3973      V value = get(key);
3974      if (!result.containsKey(key)) {
3975        result.put(key, value);
3976        if (value == null) {
3977          misses++;
3978          keysToLoad.add(key);
3979        } else {
3980          hits++;
3981        }
3982      }
3983    }
3984
3985    try {
3986      if (!keysToLoad.isEmpty()) {
3987        try {
3988          Map<K, V> newEntries = loadAll(keysToLoad, defaultLoader);
3989          for (K key : keysToLoad) {
3990            V value = newEntries.get(key);
3991            if (value == null) {
3992              throw new InvalidCacheLoadException("loadAll failed to return a value for " + key);
3993            }
3994            result.put(key, value);
3995          }
3996        } catch (UnsupportedLoadingOperationException e) {
3997          // loadAll not implemented, fallback to load
3998          for (K key : keysToLoad) {
3999            misses--; // get will count this miss
4000            result.put(key, get(key, defaultLoader));
4001          }
4002        }
4003      }
4004      return ImmutableMap.copyOf(result);
4005    } finally {
4006      globalStatsCounter.recordHits(hits);
4007      globalStatsCounter.recordMisses(misses);
4008    }
4009  }
4010
4011  /**
4012   * Returns the result of calling {@link CacheLoader#loadAll}, or null if {@code loader} doesn't
4013   * implement {@code loadAll}.
4014   */
4015  @Nullable
4016  Map<K, V> loadAll(Set<? extends K> keys, CacheLoader<? super K, V> loader)
4017      throws ExecutionException {
4018    checkNotNull(loader);
4019    checkNotNull(keys);
4020    Stopwatch stopwatch = Stopwatch.createStarted();
4021    Map<K, V> result;
4022    boolean success = false;
4023    try {
4024      @SuppressWarnings("unchecked") // safe since all keys extend K
4025      Map<K, V> map = (Map<K, V>) loader.loadAll(keys);
4026      result = map;
4027      success = true;
4028    } catch (UnsupportedLoadingOperationException e) {
4029      success = true;
4030      throw e;
4031    } catch (InterruptedException e) {
4032      Thread.currentThread().interrupt();
4033      throw new ExecutionException(e);
4034    } catch (RuntimeException e) {
4035      throw new UncheckedExecutionException(e);
4036    } catch (Exception e) {
4037      throw new ExecutionException(e);
4038    } catch (Error e) {
4039      throw new ExecutionError(e);
4040    } finally {
4041      if (!success) {
4042        globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS));
4043      }
4044    }
4045
4046    if (result == null) {
4047      globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS));
4048      throw new InvalidCacheLoadException(loader + " returned null map from loadAll");
4049    }
4050
4051    stopwatch.stop();
4052    // TODO(fry): batch by segment
4053    boolean nullsPresent = false;
4054    for (Map.Entry<K, V> entry : result.entrySet()) {
4055      K key = entry.getKey();
4056      V value = entry.getValue();
4057      if (key == null || value == null) {
4058        // delay failure until non-null entries are stored
4059        nullsPresent = true;
4060      } else {
4061        put(key, value);
4062      }
4063    }
4064
4065    if (nullsPresent) {
4066      globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS));
4067      throw new InvalidCacheLoadException(loader + " returned null keys or values from loadAll");
4068    }
4069
4070    // TODO(fry): record count of loaded entries
4071    globalStatsCounter.recordLoadSuccess(stopwatch.elapsed(NANOSECONDS));
4072    return result;
4073  }
4074
4075  /**
4076   * Returns the internal entry for the specified key. The entry may be loading, expired, or
4077   * partially collected.
4078   */
4079  ReferenceEntry<K, V> getEntry(@Nullable Object key) {
4080    // does not impact recency ordering
4081    if (key == null) {
4082      return null;
4083    }
4084    int hash = hash(key);
4085    return segmentFor(hash).getEntry(key, hash);
4086  }
4087
4088  void refresh(K key) {
4089    int hash = hash(checkNotNull(key));
4090    segmentFor(hash).refresh(key, hash, defaultLoader, false);
4091  }
4092
4093  @Override
4094  public boolean containsKey(@Nullable Object key) {
4095    // does not impact recency ordering
4096    if (key == null) {
4097      return false;
4098    }
4099    int hash = hash(key);
4100    return segmentFor(hash).containsKey(key, hash);
4101  }
4102
4103  @Override
4104  public boolean containsValue(@Nullable Object value) {
4105    // does not impact recency ordering
4106    if (value == null) {
4107      return false;
4108    }
4109
4110    // This implementation is patterned after ConcurrentHashMap, but without the locking. The only
4111    // way for it to return a false negative would be for the target value to jump around in the map
4112    // such that none of the subsequent iterations observed it, despite the fact that at every point
4113    // in time it was present somewhere int the map. This becomes increasingly unlikely as
4114    // CONTAINS_VALUE_RETRIES increases, though without locking it is theoretically possible.
4115    long now = ticker.read();
4116    final Segment<K,V>[] segments = this.segments;
4117    long last = -1L;
4118    for (int i = 0; i < CONTAINS_VALUE_RETRIES; i++) {
4119      long sum = 0L;
4120      for (Segment<K, V> segment : segments) {
4121        // ensure visibility of most recent completed write
4122        @SuppressWarnings({"UnusedDeclaration", "unused"})
4123        int c = segment.count; // read-volatile
4124
4125        AtomicReferenceArray<ReferenceEntry<K, V>> table = segment.table;
4126        for (int j = 0 ; j < table.length(); j++) {
4127          for (ReferenceEntry<K, V> e = table.get(j); e != null; e = e.getNext()) {
4128            V v = segment.getLiveValue(e, now);
4129            if (v != null && valueEquivalence.equivalent(value, v)) {
4130              return true;
4131            }
4132          }
4133        }
4134        sum += segment.modCount;
4135      }
4136      if (sum == last) {
4137        break;
4138      }
4139      last = sum;
4140    }
4141    return false;
4142  }
4143
4144  @Override
4145  public V put(K key, V value) {
4146    checkNotNull(key);
4147    checkNotNull(value);
4148    int hash = hash(key);
4149    return segmentFor(hash).put(key, hash, value, false);
4150  }
4151
4152  @Override
4153  public V putIfAbsent(K key, V value) {
4154    checkNotNull(key);
4155    checkNotNull(value);
4156    int hash = hash(key);
4157    return segmentFor(hash).put(key, hash, value, true);
4158  }
4159
4160  @Override
4161  public void putAll(Map<? extends K, ? extends V> m) {
4162    for (Entry<? extends K, ? extends V> e : m.entrySet()) {
4163      put(e.getKey(), e.getValue());
4164    }
4165  }
4166
4167  @Override
4168  public V remove(@Nullable Object key) {
4169    if (key == null) {
4170      return null;
4171    }
4172    int hash = hash(key);
4173    return segmentFor(hash).remove(key, hash);
4174  }
4175
4176  @Override
4177  public boolean remove(@Nullable Object key, @Nullable Object value) {
4178    if (key == null || value == null) {
4179      return false;
4180    }
4181    int hash = hash(key);
4182    return segmentFor(hash).remove(key, hash, value);
4183  }
4184
4185  @Override
4186  public boolean replace(K key, @Nullable V oldValue, V newValue) {
4187    checkNotNull(key);
4188    checkNotNull(newValue);
4189    if (oldValue == null) {
4190      return false;
4191    }
4192    int hash = hash(key);
4193    return segmentFor(hash).replace(key, hash, oldValue, newValue);
4194  }
4195
4196  @Override
4197  public V replace(K key, V value) {
4198    checkNotNull(key);
4199    checkNotNull(value);
4200    int hash = hash(key);
4201    return segmentFor(hash).replace(key, hash, value);
4202  }
4203
4204  @Override
4205  public void clear() {
4206    for (Segment<K, V> segment : segments) {
4207      segment.clear();
4208    }
4209  }
4210
4211  void invalidateAll(Iterable<?> keys) {
4212    // TODO(fry): batch by segment
4213    for (Object key : keys) {
4214      remove(key);
4215    }
4216  }
4217
4218  Set<K> keySet;
4219
4220  @Override
4221  public Set<K> keySet() {
4222    // does not impact recency ordering
4223    Set<K> ks = keySet;
4224    return (ks != null) ? ks : (keySet = new KeySet(this));
4225  }
4226
4227  Collection<V> values;
4228
4229  @Override
4230  public Collection<V> values() {
4231    // does not impact recency ordering
4232    Collection<V> vs = values;
4233    return (vs != null) ? vs : (values = new Values(this));
4234  }
4235
4236  Set<Entry<K, V>> entrySet;
4237
4238  @Override
4239  @GwtIncompatible("Not supported.")
4240  public Set<Entry<K, V>> entrySet() {
4241    // does not impact recency ordering
4242    Set<Entry<K, V>> es = entrySet;
4243    return (es != null) ? es : (entrySet = new EntrySet(this));
4244  }
4245
4246  // Iterator Support
4247
4248  abstract class HashIterator<T> implements Iterator<T> {
4249
4250    int nextSegmentIndex;
4251    int nextTableIndex;
4252    Segment<K, V> currentSegment;
4253    AtomicReferenceArray<ReferenceEntry<K, V>> currentTable;
4254    ReferenceEntry<K, V> nextEntry;
4255    WriteThroughEntry nextExternal;
4256    WriteThroughEntry lastReturned;
4257
4258    HashIterator() {
4259      nextSegmentIndex = segments.length - 1;
4260      nextTableIndex = -1;
4261      advance();
4262    }
4263
4264    @Override
4265    public abstract T next();
4266
4267    final void advance() {
4268      nextExternal = null;
4269
4270      if (nextInChain()) {
4271        return;
4272      }
4273
4274      if (nextInTable()) {
4275        return;
4276      }
4277
4278      while (nextSegmentIndex >= 0) {
4279        currentSegment = segments[nextSegmentIndex--];
4280        if (currentSegment.count != 0) {
4281          currentTable = currentSegment.table;
4282          nextTableIndex = currentTable.length() - 1;
4283          if (nextInTable()) {
4284            return;
4285          }
4286        }
4287      }
4288    }
4289
4290    /**
4291     * Finds the next entry in the current chain. Returns true if an entry was found.
4292     */
4293    boolean nextInChain() {
4294      if (nextEntry != null) {
4295        for (nextEntry = nextEntry.getNext(); nextEntry != null; nextEntry = nextEntry.getNext()) {
4296          if (advanceTo(nextEntry)) {
4297            return true;
4298          }
4299        }
4300      }
4301      return false;
4302    }
4303
4304    /**
4305     * Finds the next entry in the current table. Returns true if an entry was found.
4306     */
4307    boolean nextInTable() {
4308      while (nextTableIndex >= 0) {
4309        if ((nextEntry = currentTable.get(nextTableIndex--)) != null) {
4310          if (advanceTo(nextEntry) || nextInChain()) {
4311            return true;
4312          }
4313        }
4314      }
4315      return false;
4316    }
4317
4318    /**
4319     * Advances to the given entry. Returns true if the entry was valid, false if it should be
4320     * skipped.
4321     */
4322    boolean advanceTo(ReferenceEntry<K, V> entry) {
4323      try {
4324        long now = ticker.read();
4325        K key = entry.getKey();
4326        V value = getLiveValue(entry, now);
4327        if (value != null) {
4328          nextExternal = new WriteThroughEntry(key, value);
4329          return true;
4330        } else {
4331          // Skip stale entry.
4332          return false;
4333        }
4334      } finally {
4335        currentSegment.postReadCleanup();
4336      }
4337    }
4338
4339    @Override
4340    public boolean hasNext() {
4341      return nextExternal != null;
4342    }
4343
4344    WriteThroughEntry nextEntry() {
4345      if (nextExternal == null) {
4346        throw new NoSuchElementException();
4347      }
4348      lastReturned = nextExternal;
4349      advance();
4350      return lastReturned;
4351    }
4352
4353    @Override
4354    public void remove() {
4355      checkState(lastReturned != null);
4356      LocalCache.this.remove(lastReturned.getKey());
4357      lastReturned = null;
4358    }
4359  }
4360
4361  final class KeyIterator extends HashIterator<K> {
4362
4363    @Override
4364    public K next() {
4365      return nextEntry().getKey();
4366    }
4367  }
4368
4369  final class ValueIterator extends HashIterator<V> {
4370
4371    @Override
4372    public V next() {
4373      return nextEntry().getValue();
4374    }
4375  }
4376
4377  /**
4378   * Custom Entry class used by EntryIterator.next(), that relays setValue changes to the
4379   * underlying map.
4380   */
4381  final class WriteThroughEntry implements Entry<K, V> {
4382    final K key; // non-null
4383    V value; // non-null
4384
4385    WriteThroughEntry(K key, V value) {
4386      this.key = key;
4387      this.value = value;
4388    }
4389
4390    @Override
4391    public K getKey() {
4392      return key;
4393    }
4394
4395    @Override
4396    public V getValue() {
4397      return value;
4398    }
4399
4400    @Override
4401    public boolean equals(@Nullable Object object) {
4402      // Cannot use key and value equivalence
4403      if (object instanceof Entry) {
4404        Entry<?, ?> that = (Entry<?, ?>) object;
4405        return key.equals(that.getKey()) && value.equals(that.getValue());
4406      }
4407      return false;
4408    }
4409
4410    @Override
4411    public int hashCode() {
4412      // Cannot use key and value equivalence
4413      return key.hashCode() ^ value.hashCode();
4414    }
4415
4416    @Override
4417    public V setValue(V newValue) {
4418      throw new UnsupportedOperationException();
4419    }
4420
4421    /**
4422     * Returns a string representation of the form <code>{key}={value}</code>.
4423     */
4424    @Override public String toString() {
4425      return getKey() + "=" + getValue();
4426    }
4427  }
4428
4429  final class EntryIterator extends HashIterator<Entry<K, V>> {
4430
4431    @Override
4432    public Entry<K, V> next() {
4433      return nextEntry();
4434    }
4435  }
4436
4437  abstract class AbstractCacheSet<T> extends AbstractSet<T> {
4438    final ConcurrentMap<?, ?> map;
4439
4440    AbstractCacheSet(ConcurrentMap<?, ?> map) {
4441      this.map = map;
4442    }
4443
4444    @Override
4445    public int size() {
4446      return map.size();
4447    }
4448
4449    @Override
4450    public boolean isEmpty() {
4451      return map.isEmpty();
4452    }
4453
4454    @Override
4455    public void clear() {
4456      map.clear();
4457    }
4458  }
4459
4460  final class KeySet extends AbstractCacheSet<K> {
4461
4462    KeySet(ConcurrentMap<?, ?> map) {
4463      super(map);
4464    }
4465
4466    @Override
4467    public Iterator<K> iterator() {
4468      return new KeyIterator();
4469    }
4470
4471    @Override
4472    public boolean contains(Object o) {
4473      return map.containsKey(o);
4474    }
4475
4476    @Override
4477    public boolean remove(Object o) {
4478      return map.remove(o) != null;
4479    }
4480  }
4481
4482  final class Values extends AbstractCollection<V> {
4483    private final ConcurrentMap<?, ?> map;
4484
4485    Values(ConcurrentMap<?, ?> map) {
4486      this.map = map;
4487    }
4488
4489    @Override public int size() {
4490      return map.size();
4491    }
4492
4493    @Override public boolean isEmpty() {
4494      return map.isEmpty();
4495    }
4496
4497    @Override public void clear() {
4498      map.clear();
4499    }
4500
4501    @Override
4502    public Iterator<V> iterator() {
4503      return new ValueIterator();
4504    }
4505
4506    @Override
4507    public boolean contains(Object o) {
4508      return map.containsValue(o);
4509    }
4510  }
4511
4512  final class EntrySet extends AbstractCacheSet<Entry<K, V>> {
4513
4514    EntrySet(ConcurrentMap<?, ?> map) {
4515      super(map);
4516    }
4517
4518    @Override
4519    public Iterator<Entry<K, V>> iterator() {
4520      return new EntryIterator();
4521    }
4522
4523    @Override
4524    public boolean contains(Object o) {
4525      if (!(o instanceof Entry)) {
4526        return false;
4527      }
4528      Entry<?, ?> e = (Entry<?, ?>) o;
4529      Object key = e.getKey();
4530      if (key == null) {
4531        return false;
4532      }
4533      V v = LocalCache.this.get(key);
4534
4535      return v != null && valueEquivalence.equivalent(e.getValue(), v);
4536    }
4537
4538    @Override
4539    public boolean remove(Object o) {
4540      if (!(o instanceof Entry)) {
4541        return false;
4542      }
4543      Entry<?, ?> e = (Entry<?, ?>) o;
4544      Object key = e.getKey();
4545      return key != null && LocalCache.this.remove(key, e.getValue());
4546    }
4547  }
4548
4549  // Serialization Support
4550
4551  /**
4552   * Serializes the configuration of a LocalCache, reconsitituting it as a Cache using
4553   * CacheBuilder upon deserialization. An instance of this class is fit for use by the writeReplace
4554   * of LocalManualCache.
4555   *
4556   * Unfortunately, readResolve() doesn't get called when a circular dependency is present, so the
4557   * proxy must be able to behave as the cache itself.
4558   */
4559  static class ManualSerializationProxy<K, V>
4560      extends ForwardingCache<K, V> implements Serializable {
4561    private static final long serialVersionUID = 1;
4562
4563    final Strength keyStrength;
4564    final Strength valueStrength;
4565    final Equivalence<Object> keyEquivalence;
4566    final Equivalence<Object> valueEquivalence;
4567    final long expireAfterWriteNanos;
4568    final long expireAfterAccessNanos;
4569    final long maxWeight;
4570    final Weigher<K, V> weigher;
4571    final int concurrencyLevel;
4572    final RemovalListener<? super K, ? super V> removalListener;
4573    final Ticker ticker;
4574    final CacheLoader<? super K, V> loader;
4575
4576    transient Cache<K, V> delegate;
4577
4578    ManualSerializationProxy(LocalCache<K, V> cache) {
4579      this(
4580          cache.keyStrength,
4581          cache.valueStrength,
4582          cache.keyEquivalence,
4583          cache.valueEquivalence,
4584          cache.expireAfterWriteNanos,
4585          cache.expireAfterAccessNanos,
4586          cache.maxWeight,
4587          cache.weigher,
4588          cache.concurrencyLevel,
4589          cache.removalListener,
4590          cache.ticker,
4591          cache.defaultLoader);
4592    }
4593
4594    private ManualSerializationProxy(
4595        Strength keyStrength, Strength valueStrength,
4596        Equivalence<Object> keyEquivalence, Equivalence<Object> valueEquivalence,
4597        long expireAfterWriteNanos, long expireAfterAccessNanos, long maxWeight,
4598        Weigher<K, V> weigher, int concurrencyLevel,
4599        RemovalListener<? super K, ? super V> removalListener,
4600        Ticker ticker, CacheLoader<? super K, V> loader) {
4601      this.keyStrength = keyStrength;
4602      this.valueStrength = valueStrength;
4603      this.keyEquivalence = keyEquivalence;
4604      this.valueEquivalence = valueEquivalence;
4605      this.expireAfterWriteNanos = expireAfterWriteNanos;
4606      this.expireAfterAccessNanos = expireAfterAccessNanos;
4607      this.maxWeight = maxWeight;
4608      this.weigher = weigher;
4609      this.concurrencyLevel = concurrencyLevel;
4610      this.removalListener = removalListener;
4611      this.ticker = (ticker == Ticker.systemTicker() || ticker == NULL_TICKER)
4612          ? null : ticker;
4613      this.loader = loader;
4614    }
4615
4616   CacheBuilder<K, V> recreateCacheBuilder() {
4617      CacheBuilder<K, V> builder = CacheBuilder.newBuilder()
4618          .setKeyStrength(keyStrength)
4619          .setValueStrength(valueStrength)
4620          .keyEquivalence(keyEquivalence)
4621          .valueEquivalence(valueEquivalence)
4622          .concurrencyLevel(concurrencyLevel)
4623          .removalListener(removalListener);
4624      builder.strictParsing = false;
4625      if (expireAfterWriteNanos > 0) {
4626        builder.expireAfterWrite(expireAfterWriteNanos, TimeUnit.NANOSECONDS);
4627      }
4628      if (expireAfterAccessNanos > 0) {
4629        builder.expireAfterAccess(expireAfterAccessNanos, TimeUnit.NANOSECONDS);
4630      }
4631      if (weigher != OneWeigher.INSTANCE) {
4632        builder.weigher(weigher);
4633        if (maxWeight != UNSET_INT) {
4634          builder.maximumWeight(maxWeight);
4635        }
4636      } else {
4637        if (maxWeight != UNSET_INT) {
4638          builder.maximumSize(maxWeight);
4639        }
4640      }
4641      if (ticker != null) {
4642        builder.ticker(ticker);
4643      }
4644      return builder;
4645    }
4646
4647    private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
4648      in.defaultReadObject();
4649      CacheBuilder<K, V> builder = recreateCacheBuilder();
4650      this.delegate = builder.build();
4651    }
4652
4653    private Object readResolve() {
4654      return delegate;
4655    }
4656
4657    @Override
4658    protected Cache<K, V> delegate() {
4659      return delegate;
4660    }
4661  }
4662
4663  /**
4664   * Serializes the configuration of a LocalCache, reconsitituting it as an LoadingCache using
4665   * CacheBuilder upon deserialization. An instance of this class is fit for use by the writeReplace
4666   * of LocalLoadingCache.
4667   *
4668   * Unfortunately, readResolve() doesn't get called when a circular dependency is present, so the
4669   * proxy must be able to behave as the cache itself.
4670   */
4671  static final class LoadingSerializationProxy<K, V>
4672      extends ManualSerializationProxy<K, V> implements LoadingCache<K, V>, Serializable {
4673    private static final long serialVersionUID = 1;
4674
4675    transient LoadingCache<K, V> autoDelegate;
4676
4677    LoadingSerializationProxy(LocalCache<K, V> cache) {
4678      super(cache);
4679    }
4680
4681    private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
4682      in.defaultReadObject();
4683      CacheBuilder<K, V> builder = recreateCacheBuilder();
4684      this.autoDelegate = builder.build(loader);
4685    }
4686
4687    @Override
4688    public V get(K key) throws ExecutionException {
4689      return autoDelegate.get(key);
4690    }
4691
4692    @Override
4693    public V getUnchecked(K key) {
4694      return autoDelegate.getUnchecked(key);
4695    }
4696
4697    @Override
4698    public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException {
4699      return autoDelegate.getAll(keys);
4700    }
4701
4702    @Override
4703    public final V apply(K key) {
4704      return autoDelegate.apply(key);
4705    }
4706
4707    @Override
4708    public void refresh(K key) {
4709      autoDelegate.refresh(key);
4710    }
4711
4712    private Object readResolve() {
4713      return autoDelegate;
4714    }
4715  }
4716
4717  static class LocalManualCache<K, V> implements Cache<K, V>, Serializable {
4718    final LocalCache<K, V> localCache;
4719
4720    LocalManualCache(CacheBuilder<? super K, ? super V> builder) {
4721      this(new LocalCache<K, V>(builder, null));
4722    }
4723
4724    private LocalManualCache(LocalCache<K, V> localCache) {
4725      this.localCache = localCache;
4726    }
4727
4728    // Cache methods
4729
4730    @Override
4731    @Nullable
4732    public V getIfPresent(Object key) {
4733      return localCache.getIfPresent(key);
4734    }
4735
4736    @Override
4737    public V get(K key, final Callable<? extends V> valueLoader) throws ExecutionException {
4738      checkNotNull(valueLoader);
4739      return localCache.get(key, new CacheLoader<Object, V>() {
4740        @Override
4741        public V load(Object key) throws Exception {
4742          return valueLoader.call();
4743        }
4744      });
4745    }
4746
4747    @Override
4748    public ImmutableMap<K, V> getAllPresent(Iterable<?> keys) {
4749      return localCache.getAllPresent(keys);
4750    }
4751
4752    @Override
4753    public void put(K key, V value) {
4754      localCache.put(key, value);
4755    }
4756
4757    @Override
4758    public void putAll(Map<? extends K, ? extends V> m) {
4759      localCache.putAll(m);
4760    }
4761
4762    @Override
4763    public void invalidate(Object key) {
4764      checkNotNull(key);
4765      localCache.remove(key);
4766    }
4767
4768    @Override
4769    public void invalidateAll(Iterable<?> keys) {
4770      localCache.invalidateAll(keys);
4771    }
4772
4773    @Override
4774    public void invalidateAll() {
4775      localCache.clear();
4776    }
4777
4778    @Override
4779    public long size() {
4780      return localCache.longSize();
4781    }
4782
4783    @Override
4784    public ConcurrentMap<K, V> asMap() {
4785      return localCache;
4786    }
4787
4788    @Override
4789    public CacheStats stats() {
4790      SimpleStatsCounter aggregator = new SimpleStatsCounter();
4791      aggregator.incrementBy(localCache.globalStatsCounter);
4792      for (Segment<K, V> segment : localCache.segments) {
4793        aggregator.incrementBy(segment.statsCounter);
4794      }
4795      return aggregator.snapshot();
4796    }
4797
4798    @Override
4799    public void cleanUp() {
4800      localCache.cleanUp();
4801    }
4802
4803    // Serialization Support
4804
4805    private static final long serialVersionUID = 1;
4806
4807    Object writeReplace() {
4808      return new ManualSerializationProxy<K, V>(localCache);
4809    }
4810  }
4811
4812  static class LocalLoadingCache<K, V>
4813      extends LocalManualCache<K, V> implements LoadingCache<K, V> {
4814
4815    LocalLoadingCache(CacheBuilder<? super K, ? super V> builder,
4816        CacheLoader<? super K, V> loader) {
4817      super(new LocalCache<K, V>(builder, checkNotNull(loader)));
4818    }
4819
4820    // LoadingCache methods
4821
4822    @Override
4823    public V get(K key) throws ExecutionException {
4824      return localCache.getOrLoad(key);
4825    }
4826
4827    @Override
4828    public V getUnchecked(K key) {
4829      try {
4830        return get(key);
4831      } catch (ExecutionException e) {
4832        throw new UncheckedExecutionException(e.getCause());
4833      }
4834    }
4835
4836    @Override
4837    public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException {
4838      return localCache.getAll(keys);
4839    }
4840
4841    @Override
4842    public void refresh(K key) {
4843      localCache.refresh(key);
4844    }
4845
4846    @Override
4847    public final V apply(K key) {
4848      return getUnchecked(key);
4849    }
4850
4851    // Serialization Support
4852
4853    private static final long serialVersionUID = 1;
4854
4855    @Override
4856    Object writeReplace() {
4857      return new LoadingSerializationProxy<K, V>(localCache);
4858    }
4859  }
4860}
4861