|
@@ -12,6 +12,7 @@ import java.io.ObjectStreamField;
|
|
|
import java.io.Serializable;
|
|
|
import java.lang.reflect.ParameterizedType;
|
|
|
import java.lang.reflect.Type;
|
|
|
+import java.util.AbstractMap;
|
|
|
import java.util.Arrays;
|
|
|
import java.util.Collection;
|
|
|
import java.util.Comparator;
|
|
@@ -218,7 +219,7 @@ import java.util.concurrent.locks.ReentrantLock;
|
|
|
* @param <K> the type of keys maintained by this map
|
|
|
* @param <V> the type of mapped values
|
|
|
*/
|
|
|
-public class ConcurrentHashMapV8<K,V>
|
|
|
+public class ConcurrentHashMapV8<K,V> extends AbstractMap<K,V>
|
|
|
implements ConcurrentMap<K,V>, Serializable {
|
|
|
private static final long serialVersionUID = 7249069246763182397L;
|
|
|
|
|
@@ -275,6 +276,7 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
/** Interface describing a function mapping two ints to an int */
|
|
|
public interface IntByIntToInt { int apply(int a, int b); }
|
|
|
|
|
|
+
|
|
|
/*
|
|
|
* Overview:
|
|
|
*
|
|
@@ -380,14 +382,15 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
* The table is resized when occupancy exceeds a percentage
|
|
|
* threshold (nominally, 0.75, but see below). Any thread
|
|
|
* noticing an overfull bin may assist in resizing after the
|
|
|
- * initiating thread allocates and sets up the replacement
|
|
|
- * array. However, rather than stalling, these other threads may
|
|
|
- * proceed with insertions etc. The use of TreeBins shields us
|
|
|
- * from the worst case effects of overfilling while resizes are in
|
|
|
+ * initiating thread allocates and sets up the replacement array.
|
|
|
+ * However, rather than stalling, these other threads may proceed
|
|
|
+ * with insertions etc. The use of TreeBins shields us from the
|
|
|
+ * worst case effects of overfilling while resizes are in
|
|
|
* progress. Resizing proceeds by transferring bins, one by one,
|
|
|
- * from the table to the next table. To enable concurrency, the
|
|
|
- * next table must be (incrementally) prefilled with place-holders
|
|
|
- * serving as reverse forwarders to the old table. Because we are
|
|
|
+ * from the table to the next table. However, threads claim small
|
|
|
+ * blocks of indices to transfer (via field transferIndex) before
|
|
|
+ * doing so, reducing contention. A generation stamp in field
|
|
|
+ * sizeCtl ensures that resizings do not overlap. Because we are
|
|
|
* using power-of-two expansion, the elements from each bin must
|
|
|
* either stay at same index, or move with a power of two
|
|
|
* offset. We eliminate unnecessary node creation by catching
|
|
@@ -408,13 +411,19 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
* locks, average aggregate waits become shorter as resizing
|
|
|
* progresses. The transfer operation must also ensure that all
|
|
|
* accessible bins in both the old and new table are usable by any
|
|
|
- * traversal. This is arranged by proceeding from the last bin
|
|
|
- * (table.length - 1) up towards the first. Upon seeing a
|
|
|
- * forwarding node, traversals (see class Traverser) arrange to
|
|
|
- * move to the new table without revisiting nodes. However, to
|
|
|
- * ensure that no intervening nodes are skipped, bin splitting can
|
|
|
- * only begin after the associated reverse-forwarders are in
|
|
|
- * place.
|
|
|
+ * traversal. This is arranged in part by proceeding from the
|
|
|
+ * last bin (table.length - 1) up towards the first. Upon seeing
|
|
|
+ * a forwarding node, traversals (see class Traverser) arrange to
|
|
|
+ * move to the new table without revisiting nodes. To ensure that
|
|
|
+ * no intervening nodes are skipped even when moved out of order,
|
|
|
+ * a stack (see class TableStack) is created on first encounter of
|
|
|
+ * a forwarding node during a traversal, to maintain its place if
|
|
|
+ * later processing the current table. The need for these
|
|
|
+ * save/restore mechanics is relatively rare, but when one
|
|
|
+ * forwarding node is encountered, typically many more will be.
|
|
|
+ * So Traversers use a simple caching scheme to avoid creating so
|
|
|
+ * many new TableStack nodes. (Thanks to Peter Levart for
|
|
|
+ * suggesting use of a stack here.)
|
|
|
*
|
|
|
* The traversal scheme also applies to partial traversals of
|
|
|
* ranges of bins (via an alternate Traverser constructor)
|
|
@@ -446,16 +455,18 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
* related operations (which is the main reason we cannot use
|
|
|
* existing collections such as TreeMaps). TreeBins contain
|
|
|
* Comparable elements, but may contain others, as well as
|
|
|
- * elements that are Comparable but not necessarily Comparable
|
|
|
- * for the same T, so we cannot invoke compareTo among them. To
|
|
|
- * handle this, the tree is ordered primarily by hash value, then
|
|
|
- * by Comparable.compareTo order if applicable. On lookup at a
|
|
|
- * node, if elements are not comparable or compare as 0 then both
|
|
|
- * left and right children may need to be searched in the case of
|
|
|
- * tied hash values. (This corresponds to the full list search
|
|
|
- * that would be necessary if all elements were non-Comparable and
|
|
|
- * had tied hashes.) The red-black balancing code is updated from
|
|
|
- * pre-jdk-collections
|
|
|
+ * elements that are Comparable but not necessarily Comparable for
|
|
|
+ * the same T, so we cannot invoke compareTo among them. To handle
|
|
|
+ * this, the tree is ordered primarily by hash value, then by
|
|
|
+ * Comparable.compareTo order if applicable. On lookup at a node,
|
|
|
+ * if elements are not comparable or compare as 0 then both left
|
|
|
+ * and right children may need to be searched in the case of tied
|
|
|
+ * hash values. (This corresponds to the full list search that
|
|
|
+ * would be necessary if all elements were non-Comparable and had
|
|
|
+ * tied hashes.) On insertion, to keep a total ordering (or as
|
|
|
+ * close as is required here) across rebalancings, we compare
|
|
|
+ * classes and identityHashCodes as tie-breakers. The red-black
|
|
|
+ * balancing code is updated from pre-jdk-collections
|
|
|
* (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
|
|
|
* based in turn on Cormen, Leiserson, and Rivest "Introduction to
|
|
|
* Algorithms" (CLR).
|
|
@@ -485,6 +496,10 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
* unused "Segment" class that is instantiated in minimal form
|
|
|
* only when serializing.
|
|
|
*
|
|
|
+ * Also, solely for compatibility with previous versions of this
|
|
|
+ * class, it extends AbstractMap, even though all of its methods
|
|
|
+ * are overridden, so it is just useless baggage.
|
|
|
+ *
|
|
|
* This file is organized to make things a little easier to follow
|
|
|
* while reading than they might otherwise: First the main static
|
|
|
* declarations and utilities, then fields, then main public
|
|
@@ -565,6 +580,23 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
*/
|
|
|
private static final int MIN_TRANSFER_STRIDE = 16;
|
|
|
|
|
|
+ /**
|
|
|
+ * The number of bits used for generation stamp in sizeCtl.
|
|
|
+ * Must be at least 6 for 32bit arrays.
|
|
|
+ */
|
|
|
+ private static int RESIZE_STAMP_BITS = 16;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * The maximum number of threads that can help resize.
|
|
|
+ * Must fit in 32 - RESIZE_STAMP_BITS bits.
|
|
|
+ */
|
|
|
+ private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * The bit shift for recording size stamp in sizeCtl.
|
|
|
+ */
|
|
|
+ private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
|
|
|
+
|
|
|
/*
|
|
|
* Encodings for Node hash fields. See above for explanation.
|
|
|
*/
|
|
@@ -722,7 +754,7 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
* errors by users, these checks must operate on local variables,
|
|
|
* which accounts for some odd-looking inline assignments below.
|
|
|
* Note that calls to setTabAt always occur within locked regions,
|
|
|
- * and so in principle require only release ordering, not need
|
|
|
+ * and so in principle require only release ordering, not
|
|
|
* full volatile semantics, but are currently coded as volatile
|
|
|
* writes to be conservative.
|
|
|
*/
|
|
@@ -776,11 +808,6 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
*/
|
|
|
private transient volatile int transferIndex;
|
|
|
|
|
|
- /**
|
|
|
- * The least available table index to split while resizing.
|
|
|
- */
|
|
|
- private transient volatile int transferOrigin;
|
|
|
-
|
|
|
/**
|
|
|
* Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
|
|
|
*/
|
|
@@ -1359,6 +1386,7 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
* Saves the state of the {@code ConcurrentHashMapV8} instance to a
|
|
|
* stream (i.e., serializes it).
|
|
|
* @param s the stream
|
|
|
+ * @throws java.io.IOException if an I/O error occurs
|
|
|
* @serialData
|
|
|
* the key (Object) and value (Object)
|
|
|
* for each key-value mapping, followed by a null pair.
|
|
@@ -1401,6 +1429,9 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
/**
|
|
|
* Reconstitutes the instance from a stream (that is, deserializes it).
|
|
|
* @param s the stream
|
|
|
+ * @throws ClassNotFoundException if the class of a serialized object
|
|
|
+ * could not be found
|
|
|
+ * @throws java.io.IOException if an I/O error occurs
|
|
|
*/
|
|
|
private void readObject(java.io.ObjectInputStream s)
|
|
|
throws java.io.IOException, ClassNotFoundException {
|
|
@@ -1435,8 +1466,8 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
int sz = (int)size;
|
|
|
n = tableSizeFor(sz + (sz >>> 1) + 1);
|
|
|
}
|
|
|
- @SuppressWarnings({"rawtypes","unchecked"})
|
|
|
- Node<K,V>[] tab = (Node<K,V>[])new Node[n];
|
|
|
+ @SuppressWarnings("unchecked")
|
|
|
+ Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
|
|
|
int mask = n - 1;
|
|
|
long added = 0L;
|
|
|
while (p != null) {
|
|
@@ -2101,9 +2132,9 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
*
|
|
|
* @param initialCapacity The implementation performs internal
|
|
|
* sizing to accommodate this many elements.
|
|
|
+ * @return the new set
|
|
|
* @throws IllegalArgumentException if the initial capacity of
|
|
|
* elements is negative
|
|
|
- * @return the new set
|
|
|
* @since 1.8
|
|
|
*/
|
|
|
public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
|
|
@@ -2182,6 +2213,14 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
|
|
|
/* ---------------- Table Initialization and Resizing -------------- */
|
|
|
|
|
|
+ /**
|
|
|
+ * Returns the stamp bits for resizing a table of size n.
|
|
|
+ * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
|
|
|
+ */
|
|
|
+ static final int resizeStamp(int n) {
|
|
|
+ return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
|
|
|
+ }
|
|
|
+
|
|
|
/**
|
|
|
* Initializes table, using the size recorded in sizeCtl.
|
|
|
*/
|
|
@@ -2194,8 +2233,8 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
try {
|
|
|
if ((tab = table) == null || tab.length == 0) {
|
|
|
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
|
|
|
- @SuppressWarnings({"rawtypes","unchecked"})
|
|
|
- Node<K,V>[] nt = (Node<K,V>[])new Node[n];
|
|
|
+ @SuppressWarnings("unchecked")
|
|
|
+ Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
|
|
|
table = tab = nt;
|
|
|
sc = n - (n >>> 2);
|
|
|
}
|
|
@@ -2237,17 +2276,20 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
s = sumCount();
|
|
|
}
|
|
|
if (check >= 0) {
|
|
|
- Node<K,V>[] tab, nt; int sc;
|
|
|
+ Node<K,V>[] tab, nt; int n, sc;
|
|
|
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
|
|
|
- tab.length < MAXIMUM_CAPACITY) {
|
|
|
+ (n = tab.length) < MAXIMUM_CAPACITY) {
|
|
|
+ int rs = resizeStamp(n);
|
|
|
if (sc < 0) {
|
|
|
- if (sc == -1 || transferIndex <= transferOrigin ||
|
|
|
- (nt = nextTable) == null)
|
|
|
+ if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
|
|
|
+ sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
|
|
|
+ transferIndex <= 0)
|
|
|
break;
|
|
|
- if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
|
|
|
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
|
|
|
transfer(tab, nt);
|
|
|
}
|
|
|
- else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
|
|
|
+ else if (U.compareAndSwapInt(this, SIZECTL, sc,
|
|
|
+ (rs << RESIZE_STAMP_SHIFT) + 2))
|
|
|
transfer(tab, null);
|
|
|
s = sumCount();
|
|
|
}
|
|
@@ -2259,12 +2301,19 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
*/
|
|
|
final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
|
|
|
Node<K,V>[] nextTab; int sc;
|
|
|
- if ((f instanceof ForwardingNode) &&
|
|
|
+ if (tab != null && (f instanceof ForwardingNode) &&
|
|
|
(nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
|
|
|
- if (nextTab == nextTable && tab == table &&
|
|
|
- transferIndex > transferOrigin && (sc = sizeCtl) < -1 &&
|
|
|
- U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
|
|
|
- transfer(tab, nextTab);
|
|
|
+ int rs = resizeStamp(tab.length);
|
|
|
+ while (nextTab == nextTable && table == tab &&
|
|
|
+ (sc = sizeCtl) < 0) {
|
|
|
+ if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
|
|
|
+ sc == rs + MAX_RESIZERS || transferIndex <= 0)
|
|
|
+ break;
|
|
|
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
|
|
|
+ transfer(tab, nextTab);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
return nextTab;
|
|
|
}
|
|
|
return table;
|
|
@@ -2286,8 +2335,8 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
|
|
|
try {
|
|
|
if (table == tab) {
|
|
|
- @SuppressWarnings({"rawtypes","unchecked"})
|
|
|
- Node<K,V>[] nt = (Node<K,V>[])new Node[n];
|
|
|
+ @SuppressWarnings("unchecked")
|
|
|
+ Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
|
|
|
table = nt;
|
|
|
sc = n - (n >>> 2);
|
|
|
}
|
|
@@ -2298,9 +2347,21 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
}
|
|
|
else if (c <= sc || n >= MAXIMUM_CAPACITY)
|
|
|
break;
|
|
|
- else if (tab == table &&
|
|
|
- U.compareAndSwapInt(this, SIZECTL, sc, -2))
|
|
|
- transfer(tab, null);
|
|
|
+ else if (tab == table) {
|
|
|
+ int rs = resizeStamp(n);
|
|
|
+ if (sc < 0) {
|
|
|
+ Node<K,V>[] nt;
|
|
|
+ if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
|
|
|
+ sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
|
|
|
+ transferIndex <= 0)
|
|
|
+ break;
|
|
|
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
|
|
|
+ transfer(tab, nt);
|
|
|
+ }
|
|
|
+ else if (U.compareAndSwapInt(this, SIZECTL, sc,
|
|
|
+ (rs << RESIZE_STAMP_SHIFT) + 2))
|
|
|
+ transfer(tab, null);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2314,36 +2375,27 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
stride = MIN_TRANSFER_STRIDE; // subdivide range
|
|
|
if (nextTab == null) { // initiating
|
|
|
try {
|
|
|
- @SuppressWarnings({"rawtypes","unchecked"})
|
|
|
- Node<K,V>[] nt = (Node<K,V>[])new Node[n << 1];
|
|
|
+ @SuppressWarnings("unchecked")
|
|
|
+ Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
|
|
|
nextTab = nt;
|
|
|
} catch (Throwable ex) { // try to cope with OOME
|
|
|
sizeCtl = Integer.MAX_VALUE;
|
|
|
return;
|
|
|
}
|
|
|
nextTable = nextTab;
|
|
|
- transferOrigin = n;
|
|
|
transferIndex = n;
|
|
|
- ForwardingNode<K,V> rev = new ForwardingNode<K,V>(tab);
|
|
|
- for (int k = n; k > 0;) { // progressively reveal ready slots
|
|
|
- int nextk = (k > stride) ? k - stride : 0;
|
|
|
- for (int m = nextk; m < k; ++m)
|
|
|
- nextTab[m] = rev;
|
|
|
- for (int m = n + nextk; m < n + k; ++m)
|
|
|
- nextTab[m] = rev;
|
|
|
- U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
|
|
|
- }
|
|
|
}
|
|
|
int nextn = nextTab.length;
|
|
|
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
|
|
|
boolean advance = true;
|
|
|
boolean finishing = false; // to ensure sweep before committing nextTab
|
|
|
for (int i = 0, bound = 0;;) {
|
|
|
- int nextIndex, nextBound, fh; Node<K,V> f;
|
|
|
+ Node<K,V> f; int fh;
|
|
|
while (advance) {
|
|
|
+ int nextIndex, nextBound;
|
|
|
if (--i >= bound || finishing)
|
|
|
advance = false;
|
|
|
- else if ((nextIndex = transferIndex) <= transferOrigin) {
|
|
|
+ else if ((nextIndex = transferIndex) <= 0) {
|
|
|
i = -1;
|
|
|
advance = false;
|
|
|
}
|
|
@@ -2357,29 +2409,22 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
}
|
|
|
}
|
|
|
if (i < 0 || i >= n || i + n >= nextn) {
|
|
|
+ int sc;
|
|
|
if (finishing) {
|
|
|
nextTable = null;
|
|
|
table = nextTab;
|
|
|
sizeCtl = (n << 1) - (n >>> 1);
|
|
|
return;
|
|
|
}
|
|
|
- for (int sc;;) {
|
|
|
- if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
|
|
|
- if (sc != -1)
|
|
|
- return;
|
|
|
- finishing = advance = true;
|
|
|
- i = n; // recheck before commit
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- else if ((f = tabAt(tab, i)) == null) {
|
|
|
- if (casTabAt(tab, i, null, fwd)) {
|
|
|
- setTabAt(nextTab, i, null);
|
|
|
- setTabAt(nextTab, i + n, null);
|
|
|
- advance = true;
|
|
|
+ if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
|
|
|
+ if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
|
|
|
+ return;
|
|
|
+ finishing = advance = true;
|
|
|
+ i = n; // recheck before commit
|
|
|
}
|
|
|
}
|
|
|
+ else if ((f = tabAt(tab, i)) == null)
|
|
|
+ advance = casTabAt(tab, i, null, fwd);
|
|
|
else if ((fh = f.hash) == MOVED)
|
|
|
advance = true; // already processed
|
|
|
else {
|
|
@@ -2466,11 +2511,8 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
private final void treeifyBin(Node<K,V>[] tab, int index) {
|
|
|
Node<K,V> b; int n, sc;
|
|
|
if (tab != null) {
|
|
|
- if ((n = tab.length) < MIN_TREEIFY_CAPACITY) {
|
|
|
- if (tab == table && (sc = sizeCtl) >= 0 &&
|
|
|
- U.compareAndSwapInt(this, SIZECTL, sc, -2))
|
|
|
- transfer(tab, null);
|
|
|
- }
|
|
|
+ if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
|
|
|
+ tryPresize(n << 1);
|
|
|
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
|
|
|
synchronized (b) {
|
|
|
if (tabAt(tab, index) == b) {
|
|
@@ -2546,19 +2588,18 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
p = pr;
|
|
|
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
|
|
|
return p;
|
|
|
- else if (pl == null && pr == null)
|
|
|
- break;
|
|
|
+ else if (pl == null)
|
|
|
+ p = pr;
|
|
|
+ else if (pr == null)
|
|
|
+ p = pl;
|
|
|
else if ((kc != null ||
|
|
|
(kc = comparableClassFor(k)) != null) &&
|
|
|
(dir = compareComparables(kc, k, pk)) != 0)
|
|
|
p = (dir < 0) ? pl : pr;
|
|
|
- else if (pl == null)
|
|
|
- p = pr;
|
|
|
- else if (pr == null ||
|
|
|
- (q = pr.findTreeNode(h, k, kc)) == null)
|
|
|
- p = pl;
|
|
|
- else
|
|
|
+ else if ((q = pr.findTreeNode(h, k, kc)) != null)
|
|
|
return q;
|
|
|
+ else
|
|
|
+ p = pl;
|
|
|
} while (p != null);
|
|
|
}
|
|
|
return null;
|
|
@@ -2584,6 +2625,23 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
static final int WAITER = 2; // set when waiting for write lock
|
|
|
static final int READER = 4; // increment value for setting read lock
|
|
|
|
|
|
+ /**
|
|
|
+ * Tie-breaking utility for ordering insertions when equal
|
|
|
+ * hashCodes and non-comparable. We don't require a total
|
|
|
+ * order, just a consistent insertion rule to maintain
|
|
|
+ * equivalence across rebalancings. Tie-breaking further than
|
|
|
+ * necessary simplifies testing a bit.
|
|
|
+ */
|
|
|
+ static int tieBreakOrder(Object a, Object b) {
|
|
|
+ int d;
|
|
|
+ if (a == null || b == null ||
|
|
|
+ (d = a.getClass().getName().
|
|
|
+ compareTo(b.getClass().getName())) == 0)
|
|
|
+ d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
|
|
|
+ -1 : 1);
|
|
|
+ return d;
|
|
|
+ }
|
|
|
+
|
|
|
/**
|
|
|
* Creates bin with initial set of nodes headed by b.
|
|
|
*/
|
|
@@ -2600,21 +2658,21 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
r = x;
|
|
|
}
|
|
|
else {
|
|
|
- Object key = x.key;
|
|
|
- int hash = x.hash;
|
|
|
+ K k = x.key;
|
|
|
+ int h = x.hash;
|
|
|
Class<?> kc = null;
|
|
|
for (TreeNode<K,V> p = r;;) {
|
|
|
int dir, ph;
|
|
|
- if ((ph = p.hash) > hash)
|
|
|
+ K pk = p.key;
|
|
|
+ if ((ph = p.hash) > h)
|
|
|
dir = -1;
|
|
|
- else if (ph < hash)
|
|
|
+ else if (ph < h)
|
|
|
dir = 1;
|
|
|
- else if ((kc != null ||
|
|
|
- (kc = comparableClassFor(key)) != null))
|
|
|
- dir = compareComparables(kc, key, p.key);
|
|
|
- else
|
|
|
- dir = 0;
|
|
|
- TreeNode<K,V> xp = p;
|
|
|
+ else if ((kc == null &&
|
|
|
+ (kc = comparableClassFor(k)) == null) ||
|
|
|
+ (dir = compareComparables(kc, k, pk)) == 0)
|
|
|
+ dir = tieBreakOrder(k, pk);
|
|
|
+ TreeNode<K,V> xp = p;
|
|
|
if ((p = (dir <= 0) ? p.left : p.right) == null) {
|
|
|
x.parent = xp;
|
|
|
if (dir <= 0)
|
|
@@ -2628,6 +2686,7 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
}
|
|
|
}
|
|
|
this.root = r;
|
|
|
+ assert checkInvariants(root);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2651,14 +2710,14 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
private final void contendedLock() {
|
|
|
boolean waiting = false;
|
|
|
for (int s;;) {
|
|
|
- if (((s = lockState) & WRITER) == 0) {
|
|
|
+ if (((s = lockState) & ~WAITER) == 0) {
|
|
|
if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
|
|
|
if (waiting)
|
|
|
waiter = null;
|
|
|
return;
|
|
|
}
|
|
|
}
|
|
|
- else if ((s | WAITER) == 0) {
|
|
|
+ else if ((s & WAITER) == 0) {
|
|
|
if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
|
|
|
waiting = true;
|
|
|
waiter = Thread.currentThread();
|
|
@@ -2676,12 +2735,13 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
*/
|
|
|
final Node<K,V> find(int h, Object k) {
|
|
|
if (k != null) {
|
|
|
- for (Node<K,V> e = first; e != null; e = e.next) {
|
|
|
+ for (Node<K,V> e = first; e != null; ) {
|
|
|
int s; K ek;
|
|
|
if (((s = lockState) & (WAITER|WRITER)) != 0) {
|
|
|
if (e.hash == h &&
|
|
|
((ek = e.key) == k || (ek != null && k.equals(ek))))
|
|
|
return e;
|
|
|
+ e = e.next;
|
|
|
}
|
|
|
else if (U.compareAndSwapInt(this, LOCKSTATE, s,
|
|
|
s + READER)) {
|
|
@@ -2711,8 +2771,9 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
*/
|
|
|
final TreeNode<K,V> putTreeVal(int h, K k, V v) {
|
|
|
Class<?> kc = null;
|
|
|
+ boolean searched = false;
|
|
|
for (TreeNode<K,V> p = root;;) {
|
|
|
- int dir, ph; K pk; TreeNode<K,V> q, pr;
|
|
|
+ int dir, ph; K pk;
|
|
|
if (p == null) {
|
|
|
first = root = new TreeNode<K,V>(h, k, v, null, null);
|
|
|
break;
|
|
@@ -2726,21 +2787,25 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
else if ((kc == null &&
|
|
|
(kc = comparableClassFor(k)) == null) ||
|
|
|
(dir = compareComparables(kc, k, pk)) == 0) {
|
|
|
- if (p.left == null)
|
|
|
- dir = 1;
|
|
|
- else if ((pr = p.right) == null ||
|
|
|
- (q = pr.findTreeNode(h, k, kc)) == null)
|
|
|
- dir = -1;
|
|
|
- else
|
|
|
- return q;
|
|
|
+ if (!searched) {
|
|
|
+ TreeNode<K,V> q, ch;
|
|
|
+ searched = true;
|
|
|
+ if (((ch = p.left) != null &&
|
|
|
+ (q = ch.findTreeNode(h, k, kc)) != null) ||
|
|
|
+ ((ch = p.right) != null &&
|
|
|
+ (q = ch.findTreeNode(h, k, kc)) != null))
|
|
|
+ return q;
|
|
|
+ }
|
|
|
+ dir = tieBreakOrder(k, pk);
|
|
|
}
|
|
|
+
|
|
|
TreeNode<K,V> xp = p;
|
|
|
- if ((p = (dir < 0) ? p.left : p.right) == null) {
|
|
|
+ if ((p = (dir <= 0) ? p.left : p.right) == null) {
|
|
|
TreeNode<K,V> x, f = first;
|
|
|
first = x = new TreeNode<K,V>(h, k, v, f, xp);
|
|
|
if (f != null)
|
|
|
f.prev = x;
|
|
|
- if (dir < 0)
|
|
|
+ if (dir <= 0)
|
|
|
xp.left = x;
|
|
|
else
|
|
|
xp.right = x;
|
|
@@ -3094,6 +3159,18 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
|
|
|
/* ----------------Table Traversal -------------- */
|
|
|
|
|
|
+ /**
|
|
|
+ * Records the table, its length, and current traversal index for a
|
|
|
+ * traverser that must process a region of a forwarded table before
|
|
|
+ * proceeding with current table.
|
|
|
+ */
|
|
|
+ static final class TableStack<K,V> {
|
|
|
+ int length;
|
|
|
+ int index;
|
|
|
+ Node<K,V>[] tab;
|
|
|
+ TableStack<K,V> next;
|
|
|
+ }
|
|
|
+
|
|
|
/**
|
|
|
* Encapsulates traversal for methods such as containsValue; also
|
|
|
* serves as a base class for other iterators and spliterators.
|
|
@@ -3118,6 +3195,7 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
static class Traverser<K,V> {
|
|
|
Node<K,V>[] tab; // current table; updated if resized
|
|
|
Node<K,V> next; // the next entry to use
|
|
|
+ TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
|
|
|
int index; // index of bin to use next
|
|
|
int baseIndex; // current index of initial table
|
|
|
int baseLimit; // index bound for initial table
|
|
@@ -3139,16 +3217,17 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
if ((e = next) != null)
|
|
|
e = e.next;
|
|
|
for (;;) {
|
|
|
- Node<K,V>[] t; int i, n; K ek; // must use locals in checks
|
|
|
+ Node<K,V>[] t; int i, n; // must use locals in checks
|
|
|
if (e != null)
|
|
|
return next = e;
|
|
|
if (baseIndex >= baseLimit || (t = tab) == null ||
|
|
|
(n = t.length) <= (i = index) || i < 0)
|
|
|
return next = null;
|
|
|
- if ((e = tabAt(t, index)) != null && e.hash < 0) {
|
|
|
+ if ((e = tabAt(t, i)) != null && e.hash < 0) {
|
|
|
if (e instanceof ForwardingNode) {
|
|
|
tab = ((ForwardingNode<K,V>)e).nextTable;
|
|
|
e = null;
|
|
|
+ pushState(t, i, n);
|
|
|
continue;
|
|
|
}
|
|
|
else if (e instanceof TreeBin)
|
|
@@ -3156,9 +3235,48 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
else
|
|
|
e = null;
|
|
|
}
|
|
|
- if ((index += baseSize) >= n)
|
|
|
- index = ++baseIndex; // visit upper slots if present
|
|
|
+ if (stack != null)
|
|
|
+ recoverState(n);
|
|
|
+ else if ((index = i + baseSize) >= n)
|
|
|
+ index = ++baseIndex; // visit upper slots if present
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Saves traversal state upon encountering a forwarding node.
|
|
|
+ */
|
|
|
+ private void pushState(Node<K,V>[] t, int i, int n) {
|
|
|
+ TableStack<K,V> s = spare; // reuse if possible
|
|
|
+ if (s != null)
|
|
|
+ spare = s.next;
|
|
|
+ else
|
|
|
+ s = new TableStack<K,V>();
|
|
|
+ s.tab = t;
|
|
|
+ s.length = n;
|
|
|
+ s.index = i;
|
|
|
+ s.next = stack;
|
|
|
+ stack = s;
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Possibly pops traversal state.
|
|
|
+ *
|
|
|
+ * @param n length of current table
|
|
|
+ */
|
|
|
+ private void recoverState(int n) {
|
|
|
+ TableStack<K,V> s; int len;
|
|
|
+ while ((s = stack) != null && (index += (len = s.length)) >= n) {
|
|
|
+ n = len;
|
|
|
+ index = s.index;
|
|
|
+ tab = s.tab;
|
|
|
+ s.tab = null;
|
|
|
+ TableStack<K,V> next = s.next;
|
|
|
+ s.next = spare; // save for reuse
|
|
|
+ stack = next;
|
|
|
+ spare = s;
|
|
|
}
|
|
|
+ if (s == null && (index += baseSize) >= n)
|
|
|
+ index = ++baseIndex;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -6126,7 +6244,6 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
private static final sun.misc.Unsafe U;
|
|
|
private static final long SIZECTL;
|
|
|
private static final long TRANSFERINDEX;
|
|
|
- private static final long TRANSFERORIGIN;
|
|
|
private static final long BASECOUNT;
|
|
|
private static final long CELLSBUSY;
|
|
|
private static final long CELLVALUE;
|
|
@@ -6141,8 +6258,6 @@ public class ConcurrentHashMapV8<K,V>
|
|
|
(k.getDeclaredField("sizeCtl"));
|
|
|
TRANSFERINDEX = U.objectFieldOffset
|
|
|
(k.getDeclaredField("transferIndex"));
|
|
|
- TRANSFERORIGIN = U.objectFieldOffset
|
|
|
- (k.getDeclaredField("transferOrigin"));
|
|
|
BASECOUNT = U.objectFieldOffset
|
|
|
(k.getDeclaredField("baseCount"));
|
|
|
CELLSBUSY = U.objectFieldOffset
|