|
|||||||||||||||||||
| Source file | Conditionals | Statements | Methods | TOTAL | |||||||||||||||
| ConcurrentReaderHashMap.java | 27,8% | 29,7% | 17,5% | 27,6% |
|
||||||||||||||
| 1 | /* | |
| 2 | File: ConcurrentReaderHashMap | |
| 3 | ||
| 4 | Written by Doug Lea. Adapted and released, under explicit | |
| 5 | permission, from JDK1.2 HashMap.java and Hashtable.java which | |
| 6 | carries the following copyright: | |
| 7 | ||
| 8 | * Copyright 1997 by Sun Microsystems, Inc., | |
| 9 | * 901 San Antonio Road, Palo Alto, California, 94303, U.S.A. | |
| 10 | * All rights reserved. | |
| 11 | * | |
| 12 | * This software is the confidential and proprietary information | |
| 13 | * of Sun Microsystems, Inc. ("Confidential Information"). You | |
| 14 | * shall not disclose such Confidential Information and shall use | |
| 15 | * it only in accordance with the terms of the license agreement | |
| 16 | * you entered into with Sun. | |
| 17 | ||
| 18 | History: | |
| 19 | Date Who What | |
| 20 | 28oct1999 dl Created | |
| 21 | 14dec1999 dl jmm snapshot | |
| 22 | 19apr2000 dl use barrierLock | |
| 23 | 12jan2001 dl public release | |
| 24 | 17nov2001 dl Minor tunings | |
| 25 | 20may2002 dl BarrierLock can now be serialized. | |
| 26 | 09dec2002 dl Fix interference checks. | |
| 27 | */ | |
| 28 | ||
| 29 | package org.dom4j.tree; | |
| 30 | ||
| 31 | import java.io.IOException; | |
| 32 | import java.io.Serializable; | |
| 33 | import java.util.AbstractCollection; | |
| 34 | import java.util.AbstractMap; | |
| 35 | import java.util.AbstractSet; | |
| 36 | import java.util.Collection; | |
| 37 | import java.util.Enumeration; | |
| 38 | import java.util.Iterator; | |
| 39 | import java.util.Map; | |
| 40 | import java.util.NoSuchElementException; | |
| 41 | import java.util.Set; | |
| 42 | ||
| 43 | /** | |
| 44 | * A version of Hashtable that supports mostly-concurrent reading, but exclusive | |
| 45 | * writing. Because reads are not limited to periods without writes, a | |
| 46 | * concurrent reader policy is weaker than a classic reader/writer policy, but | |
| 47 | * is generally faster and allows more concurrency. This class is a good choice | |
| 48 | * especially for tables that are mainly created by one thread during the | |
| 49 | * start-up phase of a program, and from then on, are mainly read (with perhaps | |
| 50 | * occasional additions or removals) in many threads. If you also need | |
| 51 | * concurrency among writes, consider instead using ConcurrentHashMap. | |
| 52 | * <p> | |
| 53 | * | |
| 54 | * Successful retrievals using get(key) and containsKey(key) usually run without | |
| 55 | * locking. Unsuccessful ones (i.e., when the key is not present) do involve | |
| 56 | * brief synchronization (locking). Also, the size and isEmpty methods are | |
| 57 | * always synchronized. | |
| 58 | * | |
| 59 | * <p> | |
| 60 | * Because retrieval operations can ordinarily overlap with writing operations | |
| 61 | * (i.e., put, remove, and their derivatives), retrievals can only be guaranteed | |
| 62 | * to return the results of the most recently <em>completed</em> operations | |
| 63 | * holding upon their onset. Retrieval operations may or may not return results | |
| 64 | * reflecting in-progress writing operations. However, the retrieval operations | |
| 65 | * do always return consistent results -- either those holding before any single | |
| 66 | * modification or after it, but never a nonsense result. For aggregate | |
| 67 | * operations such as putAll and clear, concurrent reads may reflect insertion | |
| 68 | * or removal of only some entries. In those rare contexts in which you use a | |
| 69 | * hash table to synchronize operations across threads (for example, to prevent | |
| 70 | * reads until after clears), you should either encase operations in | |
| 71 | * synchronized blocks, or instead use java.util.Hashtable. | |
| 72 | * | |
| 73 | * <p> | |
| 74 | * | |
| 75 | * This class also supports optional guaranteed exclusive reads, simply by | |
| 76 | * surrounding a call within a synchronized block, as in <br> | |
| 77 | * <code>ConcurrentReaderHashMap t; ... Object v; <br> | |
| 78 | * synchronized(t) { v = t.get(k); } </code><br> | |
| 79 | * | |
| 80 | * But this is not usually necessary in practice. For example, it is generally | |
| 81 | * inefficient to write: | |
| 82 | * | |
| 83 | * <pre> | |
| 84 | * | |
| 85 | * | |
| 86 | * ConcurrentReaderHashMap t; ... // Inefficient version | |
| 87 | * Object key; ... | |
| 88 | * Object value; ... | |
| 89 | * synchronized(t) { | |
| 90 | * if (!t.containsKey(key)) | |
| 91 | * t.put(key, value); | |
| 92 | * // other code if not previously present | |
| 93 | * } | |
| 94 | * else { | |
| 95 | * // other code if it was previously present | |
| 96 | * } | |
| 97 | * } | |
| 98 | * | |
| 99 | * | |
| 100 | * </pre> | |
| 101 | * | |
| 102 | * Instead, if the values are intended to be the same in each case, just take | |
| 103 | * advantage of the fact that put returns null if the key was not previously | |
| 104 | * present: | |
| 105 | * | |
| 106 | * <pre> | |
| 107 | * | |
| 108 | * | |
| 109 | * ConcurrentReaderHashMap t; ... // Use this instead | |
| 110 | * Object key; ... | |
| 111 | * Object value; ... | |
| 112 | * Object oldValue = t.put(key, value); | |
| 113 | * if (oldValue == null) { | |
| 114 | * // other code if not previously present | |
| 115 | * } | |
| 116 | * else { | |
| 117 | * // other code if it was previously present | |
| 118 | * } | |
| 119 | * | |
| 120 | * | |
| 121 | * </pre> | |
| 122 | * | |
| 123 | * <p> | |
| 124 | * | |
| 125 | * Iterators and Enumerations (i.e., those returned by keySet().iterator(), | |
| 126 | * entrySet().iterator(), values().iterator(), keys(), and elements()) return | |
| 127 | * elements reflecting the state of the hash table at some point at or since the | |
| 128 | * creation of the iterator/enumeration. They will return at most one instance | |
| 129 | * of each element (via next()/nextElement()), but might or might not reflect | |
| 130 | * puts and removes that have been processed since they were created. They do | |
| 131 | * <em>not</em> throw ConcurrentModificationException. However, these | |
| 132 | * iterators are designed to be used by only one thread at a time. Sharing an | |
| 133 | * iterator across multiple threads may lead to unpredictable results if the | |
| 134 | * table is being concurrently modified. Again, you can ensure interference-free | |
| 135 | * iteration by enclosing the iteration in a synchronized block. | |
| 136 | * <p> | |
| 137 | * | |
| 138 | * This class may be used as a direct replacement for any use of | |
| 139 | * java.util.Hashtable that does not depend on readers being blocked during | |
| 140 | * updates. Like Hashtable but unlike java.util.HashMap, this class does NOT | |
| 141 | * allow <tt>null</tt> to be used as a key or value. This class is also | |
| 142 | * typically faster than ConcurrentHashMap when there is usually only one thread | |
| 143 | * updating the table, but possibly many retrieving values from it. | |
| 144 | * <p> | |
| 145 | * | |
| 146 | * Implementation note: A slightly faster implementation of this class will be | |
| 147 | * possible once planned Java Memory Model revisions are in place. | |
| 148 | * | |
| 149 | * <p>[ <a | |
| 150 | * href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> | |
| 151 | * Introduction to this package. </a>] | |
| 152 | * | |
| 153 | */ | |
| 154 | ||
| 155 | class ConcurrentReaderHashMap extends AbstractMap implements Map, Cloneable, | |
| 156 | Serializable { | |
| 157 | ||
| 158 | /* | |
| 159 | * The basic strategy is an optimistic-style scheme based on the guarantee | |
| 160 | * that the hash table and its lists are always kept in a consistent enough | |
| 161 | * state to be read without locking: | |
| 162 | * | |
| 163 | * Read operations first proceed without locking, by traversing the | |
| 164 | * apparently correct list of the apparently correct bin. If an entry is | |
| 165 | * found, but not invalidated (value field null), it is returned. If not | |
| 166 | * found, operations must recheck (after a memory barrier) to make sure they | |
| 167 | * are using both the right list and the right table (which can change under | |
| 168 | * resizes). If invalidated, reads must acquire main update lock to wait out | |
| 169 | * the update, and then re-traverse. | |
| 170 | * | |
| 171 | * All list additions are at the front of each bin, making it easy to check | |
| 172 | * changes, and also fast to traverse. Entry next pointers are never | |
| 173 | * assigned. Remove() builds new nodes when necessary to preserve this. | |
| 174 | * | |
| 175 | * Remove() (also clear()) invalidates removed nodes to alert read | |
| 176 | * operations that they must wait out the full modifications. | |
| 177 | * | |
| 178 | */ | |
| 179 | ||
| 180 | /** A Serializable class for barrier lock * */ | |
| 181 | protected static class BarrierLock implements java.io.Serializable { | |
| 182 | } | |
| 183 | ||
| 184 | /** | |
| 185 | * Lock used only for its memory effects. | |
| 186 | */ | |
| 187 | protected final BarrierLock barrierLock = new BarrierLock(); | |
| 188 | ||
| 189 | /** | |
| 190 | * field written to only to guarantee lock ordering. | |
| 191 | */ | |
| 192 | ||
| 193 | protected transient Object lastWrite; | |
| 194 | ||
| 195 | /** | |
| 196 | * Force a memory synchronization that will cause all readers to see table. | |
| 197 | * Call only when already holding main synch lock. | |
| 198 | */ | |
| 199 | 20624 | protected final void recordModification(Object x) { |
| 200 | 20624 | synchronized (barrierLock) { |
| 201 | 20624 | lastWrite = x; |
| 202 | } | |
| 203 | } | |
| 204 | ||
| 205 | /** | |
| 206 | * Get ref to table; the reference and the cells it accesses will be at | |
| 207 | * least as fresh as from last use of barrierLock | |
| 208 | */ | |
| 209 | 41248 | protected final Entry[] getTableForReading() { |
| 210 | 41248 | synchronized (barrierLock) { |
| 211 | 41248 | return table; |
| 212 | } | |
| 213 | } | |
| 214 | ||
| 215 | /** | |
| 216 | * The default initial number of table slots for this table (32). Used when | |
| 217 | * not otherwise specified in constructor. | |
| 218 | */ | |
| 219 | public static int DEFAULT_INITIAL_CAPACITY = 32; | |
| 220 | ||
| 221 | /** | |
| 222 | * The minimum capacity, used if a lower value is implicitly specified by | |
| 223 | * either of the constructors with arguments. MUST be a power of two. | |
| 224 | */ | |
| 225 | private static final int MINIMUM_CAPACITY = 4; | |
| 226 | ||
| 227 | /** | |
| 228 | * The maximum capacity, used if a higher value is implicitly specified by | |
| 229 | * either of the constructors with arguments. MUST be a power of two <= 1 < | |
| 230 | * <30. | |
| 231 | */ | |
| 232 | private static final int MAXIMUM_CAPACITY = 1 << 30; | |
| 233 | ||
| 234 | /** | |
| 235 | * The default load factor for this table (1.0). Used when not otherwise | |
| 236 | * specified in constructor. | |
| 237 | */ | |
| 238 | ||
| 239 | public static final float DEFAULT_LOAD_FACTOR = 0.75f; | |
| 240 | ||
| 241 | /** | |
| 242 | * The hash table data. | |
| 243 | */ | |
| 244 | protected transient Entry[] table; | |
| 245 | ||
| 246 | /** | |
| 247 | * The total number of mappings in the hash table. | |
| 248 | */ | |
| 249 | protected transient int count; | |
| 250 | ||
| 251 | /** | |
| 252 | * The table is rehashed when its size exceeds this threshold. (The value of | |
| 253 | * this field is always (int)(capacity * loadFactor).) | |
| 254 | * | |
| 255 | * @serial | |
| 256 | */ | |
| 257 | protected int threshold; | |
| 258 | ||
| 259 | /** | |
| 260 | * The load factor for the hash table. | |
| 261 | * | |
| 262 | * @serial | |
| 263 | */ | |
| 264 | protected float loadFactor; | |
| 265 | ||
| 266 | /** | |
| 267 | * Returns the appropriate capacity (power of two) for the specified initial | |
| 268 | * capacity argument. | |
| 269 | */ | |
| 270 | 10476 | private int p2capacity(int initialCapacity) { |
| 271 | 10476 | int cap = initialCapacity; |
| 272 | ||
| 273 | // Compute the appropriate capacity | |
| 274 | 10476 | int result; |
| 275 | 10476 | if (cap > MAXIMUM_CAPACITY || cap < 0) { |
| 276 | 0 | result = MAXIMUM_CAPACITY; |
| 277 | } else { | |
| 278 | 10476 | result = MINIMUM_CAPACITY; |
| 279 | 10476 | while (result < cap) |
| 280 | 31428 | result <<= 1; |
| 281 | } | |
| 282 | 10476 | return result; |
| 283 | } | |
| 284 | ||
| 285 | /** | |
| 286 | * Return hash code for Object x. Since we are using power-of-two tables, it | |
| 287 | * is worth the effort to improve hashcode via the same multiplicative | |
| 288 | * scheme as used in IdentityHashMap. | |
| 289 | */ | |
| 290 | 2283990 | private static int hash(Object x) { |
| 291 | 2283990 | int h = x.hashCode(); |
| 292 | // Multiply by 127 (quickly, via shifts), and mix in some high | |
| 293 | // bits to help guard against bunching of codes that are | |
| 294 | // consecutive or equally spaced. | |
| 295 | 2283990 | return ((h << 7) - h + (h >>> 9) + (h >>> 17)); |
| 296 | } | |
| 297 | ||
| 298 | /** | |
| 299 | * Check for equality of non-null references x and y. | |
| 300 | */ | |
| 301 | 2222118 | protected boolean eq(Object x, Object y) { |
| 302 | 2222118 | return x == y || x.equals(y); |
| 303 | } | |
| 304 | ||
| 305 | /** | |
| 306 | * Constructs a new, empty map with the specified initial capacity and the | |
| 307 | * specified load factor. | |
| 308 | * | |
| 309 | * @param initialCapacity | |
| 310 | * the initial capacity The actual initial capacity is rounded to | |
| 311 | * the nearest power of two. | |
| 312 | * @param loadFactor | |
| 313 | * the load factor of the ConcurrentReaderHashMap | |
| 314 | * @throws IllegalArgumentException | |
| 315 | * if the initial maximum number of elements is less than zero, | |
| 316 | * or if the load factor is nonpositive. | |
| 317 | */ | |
| 318 | ||
| 319 | 10476 | public ConcurrentReaderHashMap(int initialCapacity, float loadFactor) { |
| 320 | 10476 | if (loadFactor <= 0) |
| 321 | 0 | throw new IllegalArgumentException("Illegal Load factor: " |
| 322 | + loadFactor); | |
| 323 | 10476 | this.loadFactor = loadFactor; |
| 324 | ||
| 325 | 10476 | int cap = p2capacity(initialCapacity); |
| 326 | ||
| 327 | 10476 | table = new Entry[cap]; |
| 328 | 10476 | threshold = (int) (cap * loadFactor); |
| 329 | } | |
| 330 | ||
| 331 | /** | |
| 332 | * Constructs a new, empty map with the specified initial capacity and | |
| 333 | * default load factor. | |
| 334 | * | |
| 335 | * @param initialCapacity | |
| 336 | * the initial capacity of the ConcurrentReaderHashMap. | |
| 337 | * @throws IllegalArgumentException | |
| 338 | * if the initial maximum number of elements is less than zero. | |
| 339 | */ | |
| 340 | ||
| 341 | 0 | public ConcurrentReaderHashMap(int initialCapacity) { |
| 342 | 0 | this(initialCapacity, DEFAULT_LOAD_FACTOR); |
| 343 | } | |
| 344 | ||
| 345 | /** | |
| 346 | * Constructs a new, empty map with a default initial capacity and load | |
| 347 | * factor. | |
| 348 | */ | |
| 349 | ||
| 350 | 10476 | public ConcurrentReaderHashMap() { |
| 351 | 10476 | this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); |
| 352 | } | |
| 353 | ||
| 354 | /** | |
| 355 | * Constructs a new map with the same mappings as the given map. The map is | |
| 356 | * created with a capacity of twice the number of mappings in the given map | |
| 357 | * or 16 (whichever is greater), and a default load factor. | |
| 358 | */ | |
| 359 | ||
| 360 | 0 | public ConcurrentReaderHashMap(Map t) { |
| 361 | 0 | this(Math.max((int) (t.size() / DEFAULT_LOAD_FACTOR) + 1, 16), |
| 362 | DEFAULT_LOAD_FACTOR); | |
| 363 | 0 | putAll(t); |
| 364 | } | |
| 365 | ||
| 366 | /** | |
| 367 | * Returns the number of key-value mappings in this map. | |
| 368 | * | |
| 369 | * @return the number of key-value mappings in this map. | |
| 370 | */ | |
| 371 | ||
| 372 | 0 | public synchronized int size() { |
| 373 | 0 | return count; |
| 374 | } | |
| 375 | ||
| 376 | /** | |
| 377 | * Returns <tt>true</tt> if this map contains no key-value mappings. | |
| 378 | * | |
| 379 | * @return <tt>true</tt> if this map contains no key-value mappings. | |
| 380 | */ | |
| 381 | ||
| 382 | 0 | public synchronized boolean isEmpty() { |
| 383 | 0 | return count == 0; |
| 384 | } | |
| 385 | ||
| 386 | /** | |
| 387 | * Returns the value to which the specified key is mapped in this table. | |
| 388 | * | |
| 389 | * @param key | |
| 390 | * a key in the table. | |
| 391 | * @return the value to which the key is mapped in this table; | |
| 392 | * <code>null</code> if the key is not mapped to any value in this | |
| 393 | * table. | |
| 394 | * @exception NullPointerException | |
| 395 | * if the key is <code>null</code>. | |
| 396 | * @see #put(Object, Object) | |
| 397 | */ | |
| 398 | ||
| 399 | 2263360 | public Object get(Object key) { |
| 400 | ||
| 401 | // throw null pointer exception if key null | |
| 402 | 2263360 | int hash = hash(key); |
| 403 | ||
| 404 | /* | |
| 405 | * Start off at the apparently correct bin. If entry is found, we need | |
| 406 | * to check after a barrier anyway. If not found, we need a barrier to | |
| 407 | * check if we are actually in right bin. So either way, we encounter | |
| 408 | * only one barrier unless we need to retry. And we only need to fully | |
| 409 | * synchronize if there have been concurrent modifications. | |
| 410 | */ | |
| 411 | ||
| 412 | 2263360 | Entry[] tab = table; |
| 413 | 2263360 | int index = hash & (tab.length - 1); |
| 414 | 2263360 | Entry first = tab[index]; |
| 415 | 2263360 | Entry e = first; |
| 416 | ||
| 417 | 2263360 | for (;;) { |
| 418 | 2272846 | if (e == null) { |
| 419 | ||
| 420 | // If key apparently not there, check to | |
| 421 | // make sure this was a valid read | |
| 422 | ||
| 423 | 41248 | Entry[] reread = getTableForReading(); |
| 424 | 41248 | if (tab == reread && first == tab[index]) |
| 425 | 41248 | return null; |
| 426 | else { | |
| 427 | // Wrong list -- must restart traversal at new first | |
| 428 | 0 | tab = reread; |
| 429 | 0 | e = first = tab[index = hash & (tab.length - 1)]; |
| 430 | } | |
| 431 | ||
| 432 | } | |
| 433 | ||
| 434 | 2231598 | else if (e.hash == hash && eq(key, e.key)) { |
| 435 | 2222112 | Object value = e.value; |
| 436 | 2222112 | if (value != null) |
| 437 | 2222112 | return value; |
| 438 | ||
| 439 | // Entry was invalidated during deletion. But it could | |
| 440 | // have been re-inserted, so we must retraverse. | |
| 441 | // To avoid useless contention, get lock to wait out | |
| 442 | // modifications | |
| 443 | // before retraversing. | |
| 444 | ||
| 445 | 0 | synchronized (this) { |
| 446 | 0 | tab = table; |
| 447 | } | |
| 448 | 0 | e = first = tab[index = hash & (tab.length - 1)]; |
| 449 | ||
| 450 | } else | |
| 451 | 9486 | e = e.next; |
| 452 | } | |
| 453 | } | |
| 454 | ||
| 455 | /** | |
| 456 | * Tests if the specified object is a key in this table. | |
| 457 | * | |
| 458 | * @param key | |
| 459 | * possible key. | |
| 460 | * @return <code>true</code> if and only if the specified object is a key | |
| 461 | * in this table, as determined by the <tt>equals</tt> method; | |
| 462 | * <code>false</code> otherwise. | |
| 463 | * @exception NullPointerException | |
| 464 | * if the key is <code>null</code>. | |
| 465 | * @see #contains(Object) | |
| 466 | */ | |
| 467 | ||
| 468 | 0 | public boolean containsKey(Object key) { |
| 469 | 0 | return get(key) != null; |
| 470 | } | |
| 471 | ||
| 472 | /** | |
| 473 | * Maps the specified <code>key</code> to the specified <code>value</code> | |
| 474 | * in this table. Neither the key nor the value can be <code>null</code>. | |
| 475 | * <p> | |
| 476 | * | |
| 477 | * The value can be retrieved by calling the <code>get</code> method with | |
| 478 | * a key that is equal to the original key. | |
| 479 | * | |
| 480 | * @param key | |
| 481 | * the table key. | |
| 482 | * @param value | |
| 483 | * the value. | |
| 484 | * @return the previous value of the specified key in this table, or | |
| 485 | * <code>null</code> if it did not have one. | |
| 486 | * @exception NullPointerException | |
| 487 | * if the key or value is <code>null</code>. | |
| 488 | * @see Object#equals(Object) | |
| 489 | * @see #get(Object) | |
| 490 | */ | |
| 491 | ||
| 492 | 20630 | public Object put(Object key, Object value) { |
| 493 | 20630 | if (value == null) |
| 494 | 0 | throw new NullPointerException(); |
| 495 | ||
| 496 | 20630 | int hash = hash(key); |
| 497 | 20630 | Entry[] tab = table; |
| 498 | 20630 | int index = hash & (tab.length - 1); |
| 499 | 20630 | Entry first = tab[index]; |
| 500 | 20630 | Entry e; |
| 501 | ||
| 502 | 20630 | for (e = first; e != null; e = e.next) |
| 503 | 4289 | if (e.hash == hash && eq(key, e.key)) |
| 504 | 6 | break; |
| 505 | ||
| 506 | 20630 | synchronized (this) { |
| 507 | 20630 | if (tab == table) { |
| 508 | 20630 | if (e == null) { |
| 509 | // make sure we are adding to correct list | |
| 510 | 20624 | if (first == tab[index]) { |
| 511 | // Add to front of list | |
| 512 | 20624 | Entry newEntry = new Entry(hash, key, value, first); |
| 513 | 20624 | tab[index] = newEntry; |
| 514 | 20624 | if (++count >= threshold) |
| 515 | 9 | rehash(); |
| 516 | else | |
| 517 | 20615 | recordModification(newEntry); |
| 518 | 20624 | return null; |
| 519 | } | |
| 520 | } else { | |
| 521 | 6 | Object oldValue = e.value; |
| 522 | 6 | if (first == tab[index] && oldValue != null) { |
| 523 | 6 | e.value = value; |
| 524 | 6 | return oldValue; |
| 525 | } | |
| 526 | } | |
| 527 | } | |
| 528 | ||
| 529 | // retry if wrong list or lost race against concurrent remove | |
| 530 | 0 | return sput(key, value, hash); |
| 531 | } | |
| 532 | } | |
| 533 | ||
| 534 | /** | |
| 535 | * Continuation of put(), called only when synch lock is held and | |
| 536 | * interference has been detected. | |
| 537 | */ | |
| 538 | 0 | protected Object sput(Object key, Object value, int hash) { |
| 539 | ||
| 540 | 0 | Entry[] tab = table; |
| 541 | 0 | int index = hash & (tab.length - 1); |
| 542 | 0 | Entry first = tab[index]; |
| 543 | 0 | Entry e = first; |
| 544 | ||
| 545 | 0 | for (;;) { |
| 546 | 0 | if (e == null) { |
| 547 | 0 | Entry newEntry = new Entry(hash, key, value, first); |
| 548 | 0 | tab[index] = newEntry; |
| 549 | 0 | if (++count >= threshold) |
| 550 | 0 | rehash(); |
| 551 | else | |
| 552 | 0 | recordModification(newEntry); |
| 553 | 0 | return null; |
| 554 | 0 | } else if (e.hash == hash && eq(key, e.key)) { |
| 555 | 0 | Object oldValue = e.value; |
| 556 | 0 | e.value = value; |
| 557 | 0 | return oldValue; |
| 558 | } else | |
| 559 | 0 | e = e.next; |
| 560 | } | |
| 561 | } | |
| 562 | ||
| 563 | /** | |
| 564 | * Rehashes the contents of this map into a new table with a larger | |
| 565 | * capacity. This method is called automatically when the number of keys in | |
| 566 | * this map exceeds its capacity and load factor. | |
| 567 | */ | |
| 568 | 9 | protected void rehash() { |
| 569 | 9 | Entry[] oldTable = table; |
| 570 | 9 | int oldCapacity = oldTable.length; |
| 571 | 9 | if (oldCapacity >= MAXIMUM_CAPACITY) { |
| 572 | 0 | threshold = Integer.MAX_VALUE; // avoid retriggering |
| 573 | 0 | return; |
| 574 | } | |
| 575 | ||
| 576 | 9 | int newCapacity = oldCapacity << 1; |
| 577 | 9 | int mask = newCapacity - 1; |
| 578 | 9 | threshold = (int) (newCapacity * loadFactor); |
| 579 | ||
| 580 | 9 | Entry[] newTable = new Entry[newCapacity]; |
| 581 | /* | |
| 582 | * Reclassify nodes in each list to new Map. Because we are using | |
| 583 | * power-of-two expansion, the elements from each bin must either stay | |
| 584 | * at same index, or move to oldCapacity+index. We also eliminate | |
| 585 | * unnecessary node creation by catching cases where old nodes can be | |
| 586 | * reused because their next fields won't change. Statistically, at the | |
| 587 | * default threshhold, only about one-sixth of them need cloning. (The | |
| 588 | * nodes they replace will be garbage collectable as soon as they are no | |
| 589 | * longer referenced by any reader thread that may be in the midst of | |
| 590 | * traversing table right now.) | |
| 591 | */ | |
| 592 | ||
| 593 | 9 | for (int i = 0; i < oldCapacity; i++) { |
| 594 | // We need to guarantee that any existing reads of old Map can | |
| 595 | // proceed. So we cannot yet null out each bin. | |
| 596 | 16352 | Entry e = oldTable[i]; |
| 597 | ||
| 598 | 16352 | if (e != null) { |
| 599 | 8821 | int idx = e.hash & mask; |
| 600 | 8821 | Entry next = e.next; |
| 601 | ||
| 602 | // Single node on list | |
| 603 | 8821 | if (next == null) |
| 604 | 5980 | newTable[idx] = e; |
| 605 | ||
| 606 | else { | |
| 607 | // Reuse trailing consecutive sequence of all same bit | |
| 608 | 2841 | Entry lastRun = e; |
| 609 | 2841 | int lastIdx = idx; |
| 610 | 2841 | for (Entry last = next; last != null; last = last.next) { |
| 611 | 3443 | int k = last.hash & mask; |
| 612 | 3443 | if (k != lastIdx) { |
| 613 | 1921 | lastIdx = k; |
| 614 | 1921 | lastRun = last; |
| 615 | } | |
| 616 | } | |
| 617 | 2841 | newTable[lastIdx] = lastRun; |
| 618 | ||
| 619 | // Clone all remaining nodes | |
| 620 | 2841 | for (Entry p = e; p != lastRun; p = p.next) { |
| 621 | 2116 | int k = p.hash & mask; |
| 622 | 2116 | newTable[k] = new Entry(p.hash, p.key, p.value, |
| 623 | newTable[k]); | |
| 624 | } | |
| 625 | } | |
| 626 | } | |
| 627 | } | |
| 628 | ||
| 629 | 9 | table = newTable; |
| 630 | 9 | recordModification(newTable); |
| 631 | } | |
| 632 | ||
| 633 | /** | |
| 634 | * Removes the key (and its corresponding value) from this table. This | |
| 635 | * method does nothing if the key is not in the table. | |
| 636 | * | |
| 637 | * @param key | |
| 638 | * the key that needs to be removed. | |
| 639 | * @return the value to which the key had been mapped in this table, or | |
| 640 | * <code>null</code> if the key did not have a mapping. | |
| 641 | * @exception NullPointerException | |
| 642 | * if the key is <code>null</code>. | |
| 643 | */ | |
| 644 | ||
| 645 | 0 | public Object remove(Object key) { |
| 646 | /* | |
| 647 | * Find the entry, then 1. Set value field to null, to force get() to | |
| 648 | * retry 2. Rebuild the list without this entry. All entries following | |
| 649 | * removed node can stay in list, but all preceeding ones need to be | |
| 650 | * cloned. Traversals rely on this strategy to ensure that elements will | |
| 651 | * not be repeated during iteration. | |
| 652 | */ | |
| 653 | ||
| 654 | 0 | int hash = hash(key); |
| 655 | 0 | Entry[] tab = table; |
| 656 | 0 | int index = hash & (tab.length - 1); |
| 657 | 0 | Entry first = tab[index]; |
| 658 | 0 | Entry e = first; |
| 659 | ||
| 660 | 0 | for (e = first; e != null; e = e.next) |
| 661 | 0 | if (e.hash == hash && eq(key, e.key)) |
| 662 | 0 | break; |
| 663 | ||
| 664 | 0 | synchronized (this) { |
| 665 | 0 | if (tab == table) { |
| 666 | 0 | if (e == null) { |
| 667 | 0 | if (first == tab[index]) |
| 668 | 0 | return null; |
| 669 | } else { | |
| 670 | 0 | Object oldValue = e.value; |
| 671 | 0 | if (first == tab[index] && oldValue != null) { |
| 672 | 0 | e.value = null; |
| 673 | 0 | count--; |
| 674 | ||
| 675 | 0 | Entry head = e.next; |
| 676 | 0 | for (Entry p = first; p != e; p = p.next) |
| 677 | 0 | head = new Entry(p.hash, p.key, p.value, head); |
| 678 | ||
| 679 | 0 | tab[index] = head; |
| 680 | 0 | recordModification(head); |
| 681 | 0 | return oldValue; |
| 682 | } | |
| 683 | } | |
| 684 | } | |
| 685 | ||
| 686 | // Wrong list or interference | |
| 687 | 0 | return sremove(key, hash); |
| 688 | } | |
| 689 | } | |
| 690 | ||
| 691 | /** | |
| 692 | * Continuation of remove(), called only when synch lock is held and | |
| 693 | * interference has been detected. | |
| 694 | */ | |
| 695 | ||
| 696 | 0 | protected Object sremove(Object key, int hash) { |
| 697 | 0 | Entry[] tab = table; |
| 698 | 0 | int index = hash & (tab.length - 1); |
| 699 | 0 | Entry first = tab[index]; |
| 700 | ||
| 701 | 0 | for (Entry e = first; e != null; e = e.next) { |
| 702 | 0 | if (e.hash == hash && eq(key, e.key)) { |
| 703 | 0 | Object oldValue = e.value; |
| 704 | 0 | e.value = null; |
| 705 | 0 | count--; |
| 706 | 0 | Entry head = e.next; |
| 707 | 0 | for (Entry p = first; p != e; p = p.next) |
| 708 | 0 | head = new Entry(p.hash, p.key, p.value, head); |
| 709 | ||
| 710 | 0 | tab[index] = head; |
| 711 | 0 | recordModification(head); |
| 712 | 0 | return oldValue; |
| 713 | } | |
| 714 | } | |
| 715 | 0 | return null; |
| 716 | } | |
| 717 | ||
| 718 | /** | |
| 719 | * Returns <tt>true</tt> if this map maps one or more keys to the | |
| 720 | * specified value. Note: This method requires a full internal traversal of | |
| 721 | * the hash table, and so is much slower than method <tt>containsKey</tt>. | |
| 722 | * | |
| 723 | * @param value | |
| 724 | * value whose presence in this map is to be tested. | |
| 725 | * @return <tt>true</tt> if this map maps one or more keys to the | |
| 726 | * specified value. | |
| 727 | * @exception NullPointerException | |
| 728 | * if the value is <code>null</code>. | |
| 729 | */ | |
| 730 | ||
| 731 | 0 | public boolean containsValue(Object value) { |
| 732 | 0 | if (value == null) |
| 733 | 0 | throw new NullPointerException(); |
| 734 | ||
| 735 | 0 | Entry tab[] = getTableForReading(); |
| 736 | ||
| 737 | 0 | for (int i = 0; i < tab.length; ++i) { |
| 738 | 0 | for (Entry e = tab[i]; e != null; e = e.next) |
| 739 | 0 | if (value.equals(e.value)) |
| 740 | 0 | return true; |
| 741 | } | |
| 742 | ||
| 743 | 0 | return false; |
| 744 | } | |
| 745 | ||
| 746 | /** | |
| 747 | * Tests if some key maps into the specified value in this table. This | |
| 748 | * operation is more expensive than the <code>containsKey</code> method. | |
| 749 | * <p> | |
| 750 | * | |
| 751 | * Note that this method is identical in functionality to containsValue, | |
| 752 | * (which is part of the Map interface in the collections framework). | |
| 753 | * | |
| 754 | * @param value | |
| 755 | * a value to search for. | |
| 756 | * @return <code>true</code> if and only if some key maps to the | |
| 757 | * <code>value</code> argument in this table as determined by the | |
| 758 | * <tt>equals</tt> method; <code>false</code> otherwise. | |
| 759 | * @exception NullPointerException | |
| 760 | * if the value is <code>null</code>. | |
| 761 | * @see #containsKey(Object) | |
| 762 | * @see #containsValue(Object) | |
| 763 | * @see Map | |
| 764 | */ | |
| 765 | ||
| 766 | 0 | public boolean contains(Object value) { |
| 767 | 0 | return containsValue(value); |
| 768 | } | |
| 769 | ||
| 770 | /** | |
| 771 | * Copies all of the mappings from the specified map to this one. | |
| 772 | * | |
| 773 | * These mappings replace any mappings that this map had for any of the keys | |
| 774 | * currently in the specified Map. | |
| 775 | * | |
| 776 | * @param t | |
| 777 | * Mappings to be stored in this map. | |
| 778 | */ | |
| 779 | ||
| 780 | 0 | public synchronized void putAll(Map t) { |
| 781 | 0 | int n = t.size(); |
| 782 | 0 | if (n == 0) |
| 783 | 0 | return; |
| 784 | ||
| 785 | // Expand enough to hold at least n elements without resizing. | |
| 786 | // We can only resize table by factor of two at a time. | |
| 787 | // It is faster to rehash with fewer elements, so do it now. | |
| 788 | 0 | while (n >= threshold) |
| 789 | 0 | rehash(); |
| 790 | ||
| 791 | 0 | for (Iterator it = t.entrySet().iterator(); it.hasNext();) { |
| 792 | 0 | Map.Entry entry = (Map.Entry) it.next(); |
| 793 | 0 | Object key = entry.getKey(); |
| 794 | 0 | Object value = entry.getValue(); |
| 795 | 0 | put(key, value); |
| 796 | } | |
| 797 | } | |
| 798 | ||
| 799 | /** | |
| 800 | * Removes all mappings from this map. | |
| 801 | */ | |
| 802 | 0 | public synchronized void clear() { |
| 803 | 0 | Entry tab[] = table; |
| 804 | 0 | for (int i = 0; i < tab.length; ++i) { |
| 805 | ||
| 806 | // must invalidate all to force concurrent get's to wait and then | |
| 807 | // retry | |
| 808 | 0 | for (Entry e = tab[i]; e != null; e = e.next) |
| 809 | 0 | e.value = null; |
| 810 | ||
| 811 | 0 | tab[i] = null; |
| 812 | } | |
| 813 | 0 | count = 0; |
| 814 | 0 | recordModification(tab); |
| 815 | } | |
| 816 | ||
| 817 | /** | |
| 818 | * Returns a shallow copy of this <tt>ConcurrentReaderHashMap</tt> | |
| 819 | * instance: the keys and values themselves are not cloned. | |
| 820 | * | |
| 821 | * @return a shallow copy of this map. | |
| 822 | */ | |
| 823 | ||
| 824 | 0 | public synchronized Object clone() { |
| 825 | 0 | try { |
| 826 | 0 | ConcurrentReaderHashMap t = (ConcurrentReaderHashMap) super.clone(); |
| 827 | ||
| 828 | 0 | t.keySet = null; |
| 829 | 0 | t.entrySet = null; |
| 830 | 0 | t.values = null; |
| 831 | ||
| 832 | 0 | Entry[] tab = table; |
| 833 | 0 | t.table = new Entry[tab.length]; |
| 834 | 0 | Entry[] ttab = t.table; |
| 835 | ||
| 836 | 0 | for (int i = 0; i < tab.length; ++i) { |
| 837 | 0 | Entry first = null; |
| 838 | 0 | for (Entry e = tab[i]; e != null; e = e.next) |
| 839 | 0 | first = new Entry(e.hash, e.key, e.value, first); |
| 840 | 0 | ttab[i] = first; |
| 841 | } | |
| 842 | ||
| 843 | 0 | return t; |
| 844 | } catch (CloneNotSupportedException e) { | |
| 845 | // this shouldn't happen, since we are Cloneable | |
| 846 | 0 | throw new InternalError(); |
| 847 | } | |
| 848 | } | |
| 849 | ||
| 850 | // Views | |
| 851 | ||
| 852 | protected transient Set keySet = null; | |
| 853 | ||
| 854 | protected transient Set entrySet = null; | |
| 855 | ||
| 856 | protected transient Collection values = null; | |
| 857 | ||
| 858 | /** | |
| 859 | * Returns a set view of the keys contained in this map. The set is backed | |
| 860 | * by the map, so changes to the map are reflected in the set, and | |
| 861 | * vice-versa. The set supports element removal, which removes the | |
| 862 | * corresponding mapping from this map, via the <tt>Iterator.remove</tt>, | |
| 863 | * <tt>Set.remove</tt>,<tt>removeAll</tt>,<tt>retainAll</tt>, and | |
| 864 | * <tt>clear</tt> operations. It does not support the <tt>add</tt> or | |
| 865 | * <tt>addAll</tt> operations. | |
| 866 | * | |
| 867 | * @return a set view of the keys contained in this map. | |
| 868 | */ | |
| 869 | ||
| 870 | 0 | public Set keySet() { |
| 871 | 0 | Set ks = keySet; |
| 872 | 0 | return (ks != null) ? ks : (keySet = new KeySet()); |
| 873 | } | |
| 874 | ||
| 875 | private class KeySet extends AbstractSet { | |
| 876 | 0 | public Iterator iterator() { |
| 877 | 0 | return new KeyIterator(); |
| 878 | } | |
| 879 | ||
| 880 | 0 | public int size() { |
| 881 | 0 | return ConcurrentReaderHashMap.this.size(); |
| 882 | } | |
| 883 | ||
| 884 | 0 | public boolean contains(Object o) { |
| 885 | 0 | return ConcurrentReaderHashMap.this.containsKey(o); |
| 886 | } | |
| 887 | ||
| 888 | 0 | public boolean remove(Object o) { |
| 889 | 0 | return ConcurrentReaderHashMap.this.remove(o) != null; |
| 890 | } | |
| 891 | ||
| 892 | 0 | public void clear() { |
| 893 | 0 | ConcurrentReaderHashMap.this.clear(); |
| 894 | } | |
| 895 | } | |
| 896 | ||
| 897 | /** | |
| 898 | * Returns a collection view of the values contained in this map. The | |
| 899 | * collection is backed by the map, so changes to the map are reflected in | |
| 900 | * the collection, and vice-versa. The collection supports element removal, | |
| 901 | * which removes the corresponding mapping from this map, via the | |
| 902 | * <tt>Iterator.remove</tt>,<tt>Collection.remove</tt>, | |
| 903 | * <tt>removeAll</tt>,<tt>retainAll</tt>, and <tt>clear</tt> | |
| 904 | * operations. It does not support the <tt>add</tt> or <tt>addAll</tt> | |
| 905 | * operations. | |
| 906 | * | |
| 907 | * @return a collection view of the values contained in this map. | |
| 908 | */ | |
| 909 | ||
| 910 | 0 | public Collection values() { |
| 911 | 0 | Collection vs = values; |
| 912 | 0 | return (vs != null) ? vs : (values = new Values()); |
| 913 | } | |
| 914 | ||
| 915 | private class Values extends AbstractCollection { | |
| 916 | 0 | public Iterator iterator() { |
| 917 | 0 | return new ValueIterator(); |
| 918 | } | |
| 919 | ||
| 920 | 0 | public int size() { |
| 921 | 0 | return ConcurrentReaderHashMap.this.size(); |
| 922 | } | |
| 923 | ||
| 924 | 0 | public boolean contains(Object o) { |
| 925 | 0 | return ConcurrentReaderHashMap.this.containsValue(o); |
| 926 | } | |
| 927 | ||
| 928 | 0 | public void clear() { |
| 929 | 0 | ConcurrentReaderHashMap.this.clear(); |
| 930 | } | |
| 931 | } | |
| 932 | ||
| 933 | /** | |
| 934 | * Returns a collection view of the mappings contained in this map. Each | |
| 935 | * element in the returned collection is a <tt>Map.Entry</tt>. The | |
| 936 | * collection is backed by the map, so changes to the map are reflected in | |
| 937 | * the collection, and vice-versa. The collection supports element removal, | |
| 938 | * which removes the corresponding mapping from the map, via the | |
| 939 | * <tt>Iterator.remove</tt>,<tt>Collection.remove</tt>, | |
| 940 | * <tt>removeAll</tt>,<tt>retainAll</tt>, and <tt>clear</tt> | |
| 941 | * operations. It does not support the <tt>add</tt> or <tt>addAll</tt> | |
| 942 | * operations. | |
| 943 | * | |
| 944 | * @return a collection view of the mappings contained in this map. | |
| 945 | */ | |
| 946 | ||
| 947 | 0 | public Set entrySet() { |
| 948 | 0 | Set es = entrySet; |
| 949 | 0 | return (es != null) ? es : (entrySet = new EntrySet()); |
| 950 | } | |
| 951 | ||
| 952 | private class EntrySet extends AbstractSet { | |
| 953 | 0 | public Iterator iterator() { |
| 954 | 0 | return new HashIterator(); |
| 955 | } | |
| 956 | ||
| 957 | 0 | public boolean contains(Object o) { |
| 958 | 0 | if (!(o instanceof Map.Entry)) |
| 959 | 0 | return false; |
| 960 | 0 | Map.Entry entry = (Map.Entry) o; |
| 961 | 0 | Object v = ConcurrentReaderHashMap.this.get(entry.getKey()); |
| 962 | 0 | return v != null && v.equals(entry.getValue()); |
| 963 | } | |
| 964 | ||
| 965 | 0 | public boolean remove(Object o) { |
| 966 | 0 | if (!(o instanceof Map.Entry)) |
| 967 | 0 | return false; |
| 968 | 0 | return ConcurrentReaderHashMap.this |
| 969 | .findAndRemoveEntry((Map.Entry) o); | |
| 970 | } | |
| 971 | ||
| 972 | 0 | public int size() { |
| 973 | 0 | return ConcurrentReaderHashMap.this.size(); |
| 974 | } | |
| 975 | ||
| 976 | 0 | public void clear() { |
| 977 | 0 | ConcurrentReaderHashMap.this.clear(); |
| 978 | } | |
| 979 | } | |
| 980 | ||
| 981 | /** | |
| 982 | * Helper method for entrySet.remove | |
| 983 | */ | |
| 984 | 0 | protected synchronized boolean findAndRemoveEntry(Map.Entry entry) { |
| 985 | 0 | Object key = entry.getKey(); |
| 986 | 0 | Object v = get(key); |
| 987 | 0 | if (v != null && v.equals(entry.getValue())) { |
| 988 | 0 | remove(key); |
| 989 | 0 | return true; |
| 990 | } else | |
| 991 | 0 | return false; |
| 992 | } | |
| 993 | ||
| 994 | /** | |
| 995 | * Returns an enumeration of the keys in this table. | |
| 996 | * | |
| 997 | * @return an enumeration of the keys in this table. | |
| 998 | * @see Enumeration | |
| 999 | * @see #elements() | |
| 1000 | * @see #keySet() | |
| 1001 | * @see Map | |
| 1002 | */ | |
| 1003 | 0 | public Enumeration keys() { |
| 1004 | 0 | return new KeyIterator(); |
| 1005 | } | |
| 1006 | ||
| 1007 | /** | |
| 1008 | * Returns an enumeration of the values in this table. Use the Enumeration | |
| 1009 | * methods on the returned object to fetch the elements sequentially. | |
| 1010 | * | |
| 1011 | * @return an enumeration of the values in this table. | |
| 1012 | * @see java.util.Enumeration | |
| 1013 | * @see #keys() | |
| 1014 | * @see #values() | |
| 1015 | * @see Map | |
| 1016 | */ | |
| 1017 | ||
| 1018 | 0 | public Enumeration elements() { |
| 1019 | 0 | return new ValueIterator(); |
| 1020 | } | |
| 1021 | ||
| 1022 | /** | |
| 1023 | * ConcurrentReaderHashMap collision list entry. | |
| 1024 | */ | |
| 1025 | ||
| 1026 | protected static class Entry implements Map.Entry { | |
| 1027 | ||
| 1028 | /* | |
| 1029 | * The use of volatile for value field ensures that we can detect status | |
| 1030 | * changes without synchronization. The other fields are never changed, | |
| 1031 | * and are marked as final. | |
| 1032 | */ | |
| 1033 | ||
| 1034 | protected final int hash; | |
| 1035 | ||
| 1036 | protected final Object key; | |
| 1037 | ||
| 1038 | protected final Entry next; | |
| 1039 | ||
| 1040 | protected volatile Object value; | |
| 1041 | ||
| 1042 | 22740 | Entry(int hash, Object key, Object value, Entry next) { |
| 1043 | 22740 | this.hash = hash; |
| 1044 | 22740 | this.key = key; |
| 1045 | 22740 | this.next = next; |
| 1046 | 22740 | this.value = value; |
| 1047 | } | |
| 1048 | ||
| 1049 | // Map.Entry Ops | |
| 1050 | ||
| 1051 | 0 | public Object getKey() { |
| 1052 | 0 | return key; |
| 1053 | } | |
| 1054 | ||
| 1055 | /** | |
| 1056 | * Get the value. Note: In an entrySet or entrySet.iterator, unless the | |
| 1057 | * set or iterator is used under synchronization of the table as a whole | |
| 1058 | * (or you can otherwise guarantee lack of concurrent modification), | |
| 1059 | * <tt>getValue</tt> <em>might</em> return null, reflecting the fact | |
| 1060 | * that the entry has been concurrently removed. However, there are no | |
| 1061 | * assurances that concurrent removals will be reflected using this | |
| 1062 | * method. | |
| 1063 | * | |
| 1064 | * @return the current value, or null if the entry has been detectably | |
| 1065 | * removed. | |
| 1066 | */ | |
| 1067 | 0 | public Object getValue() { |
| 1068 | 0 | return value; |
| 1069 | } | |
| 1070 | ||
| 1071 | /** | |
| 1072 | * Set the value of this entry. Note: In an entrySet or | |
| 1073 | * entrySet.iterator), unless the set or iterator is used under | |
| 1074 | * synchronization of the table as a whole (or you can otherwise | |
| 1075 | * guarantee lack of concurrent modification), <tt>setValue</tt> is | |
| 1076 | * not strictly guaranteed to actually replace the value field obtained | |
| 1077 | * via the <tt>get</tt> operation of the underlying hash table in | |
| 1078 | * multithreaded applications. If iterator-wide synchronization is not | |
| 1079 | * used, and any other concurrent <tt>put</tt> or <tt>remove</tt> | |
| 1080 | * operations occur, sometimes even to <em>other</em> entries, then | |
| 1081 | * this change is not guaranteed to be reflected in the hash table. (It | |
| 1082 | * might, or it might not. There are no assurances either way.) | |
| 1083 | * | |
| 1084 | * @param value | |
| 1085 | * the new value. | |
| 1086 | * @return the previous value, or null if entry has been detectably | |
| 1087 | * removed. | |
| 1088 | * @exception NullPointerException | |
| 1089 | * if the value is <code>null</code>. | |
| 1090 | * | |
| 1091 | */ | |
| 1092 | ||
| 1093 | 0 | public Object setValue(Object value) { |
| 1094 | 0 | if (value == null) |
| 1095 | 0 | throw new NullPointerException(); |
| 1096 | 0 | Object oldValue = this.value; |
| 1097 | 0 | this.value = value; |
| 1098 | 0 | return oldValue; |
| 1099 | } | |
| 1100 | ||
| 1101 | 0 | public boolean equals(Object o) { |
| 1102 | 0 | if (!(o instanceof Map.Entry)) |
| 1103 | 0 | return false; |
| 1104 | 0 | Map.Entry e = (Map.Entry) o; |
| 1105 | 0 | return (key.equals(e.getKey()) && value.equals(e.getValue())); |
| 1106 | } | |
| 1107 | ||
| 1108 | 0 | public int hashCode() { |
| 1109 | 0 | return key.hashCode() ^ value.hashCode(); |
| 1110 | } | |
| 1111 | ||
| 1112 | 0 | public String toString() { |
| 1113 | 0 | return key + "=" + value; |
| 1114 | } | |
| 1115 | ||
| 1116 | } | |
| 1117 | ||
| 1118 | protected class HashIterator implements Iterator, Enumeration { | |
| 1119 | protected final Entry[] tab; // snapshot of table | |
| 1120 | ||
| 1121 | protected int index; // current slot | |
| 1122 | ||
| 1123 | protected Entry entry = null; // current node of slot | |
| 1124 | ||
| 1125 | protected Object currentKey; // key for current node | |
| 1126 | ||
| 1127 | protected Object currentValue; // value for current node | |
| 1128 | ||
| 1129 | protected Entry lastReturned = null; // last node returned by next | |
| 1130 | ||
| 1131 | 0 | protected HashIterator() { |
| 1132 | 0 | tab = ConcurrentReaderHashMap.this.getTableForReading(); |
| 1133 | 0 | index = tab.length - 1; |
| 1134 | } | |
| 1135 | ||
| 1136 | 0 | public boolean hasMoreElements() { |
| 1137 | 0 | return hasNext(); |
| 1138 | } | |
| 1139 | ||
| 1140 | 0 | public Object nextElement() { |
| 1141 | 0 | return next(); |
| 1142 | } | |
| 1143 | ||
| 1144 | 0 | public boolean hasNext() { |
| 1145 | ||
| 1146 | /* | |
| 1147 | * currentkey and currentValue are set here to ensure that next() | |
| 1148 | * returns normally if hasNext() returns true. This avoids surprises | |
| 1149 | * especially when final element is removed during traversal -- | |
| 1150 | * instead, we just ignore the removal during current traversal. | |
| 1151 | */ | |
| 1152 | ||
| 1153 | 0 | for (;;) { |
| 1154 | 0 | if (entry != null) { |
| 1155 | 0 | Object v = entry.value; |
| 1156 | 0 | if (v != null) { |
| 1157 | 0 | currentKey = entry.key; |
| 1158 | 0 | currentValue = v; |
| 1159 | 0 | return true; |
| 1160 | } else | |
| 1161 | 0 | entry = entry.next; |
| 1162 | } | |
| 1163 | ||
| 1164 | 0 | while (entry == null && index >= 0) |
| 1165 | 0 | entry = tab[index--]; |
| 1166 | ||
| 1167 | 0 | if (entry == null) { |
| 1168 | 0 | currentKey = currentValue = null; |
| 1169 | 0 | return false; |
| 1170 | } | |
| 1171 | } | |
| 1172 | } | |
| 1173 | ||
| 1174 | 0 | protected Object returnValueOfNext() { |
| 1175 | 0 | return entry; |
| 1176 | } | |
| 1177 | ||
| 1178 | 0 | public Object next() { |
| 1179 | 0 | if (currentKey == null && !hasNext()) |
| 1180 | 0 | throw new NoSuchElementException(); |
| 1181 | ||
| 1182 | 0 | Object result = returnValueOfNext(); |
| 1183 | 0 | lastReturned = entry; |
| 1184 | 0 | currentKey = currentValue = null; |
| 1185 | 0 | entry = entry.next; |
| 1186 | 0 | return result; |
| 1187 | } | |
| 1188 | ||
| 1189 | 0 | public void remove() { |
| 1190 | 0 | if (lastReturned == null) |
| 1191 | 0 | throw new IllegalStateException(); |
| 1192 | 0 | ConcurrentReaderHashMap.this.remove(lastReturned.key); |
| 1193 | 0 | lastReturned = null; |
| 1194 | } | |
| 1195 | ||
| 1196 | } | |
| 1197 | ||
| 1198 | protected class KeyIterator extends HashIterator { | |
| 1199 | 0 | protected Object returnValueOfNext() { |
| 1200 | 0 | return currentKey; |
| 1201 | } | |
| 1202 | } | |
| 1203 | ||
| 1204 | protected class ValueIterator extends HashIterator { | |
| 1205 | 0 | protected Object returnValueOfNext() { |
| 1206 | 0 | return currentValue; |
| 1207 | } | |
| 1208 | } | |
| 1209 | ||
| 1210 | /** | |
| 1211 | * Save the state of the <tt>ConcurrentReaderHashMap</tt> instance to a | |
| 1212 | * stream (i.e., serialize it). | |
| 1213 | * | |
| 1214 | * @serialData The <i>capacity </i> of the ConcurrentReaderHashMap (the | |
| 1215 | * length of the bucket array) is emitted (int), followed by the | |
| 1216 | * <i>size </i> of the ConcurrentReaderHashMap (the number of | |
| 1217 | * key-value mappings), followed by the key (Object) and value | |
| 1218 | * (Object) for each key-value mapping represented by the | |
| 1219 | * ConcurrentReaderHashMap The key-value mappings are emitted in | |
| 1220 | * no particular order. | |
| 1221 | */ | |
| 1222 | ||
| 1223 | 0 | private synchronized void writeObject(java.io.ObjectOutputStream s) |
| 1224 | throws IOException { | |
| 1225 | // Write out the threshold, loadfactor, and any hidden stuff | |
| 1226 | 0 | s.defaultWriteObject(); |
| 1227 | ||
| 1228 | // Write out number of buckets | |
| 1229 | 0 | s.writeInt(table.length); |
| 1230 | ||
| 1231 | // Write out size (number of Mappings) | |
| 1232 | 0 | s.writeInt(count); |
| 1233 | ||
| 1234 | // Write out keys and values (alternating) | |
| 1235 | 0 | for (int index = table.length - 1; index >= 0; index--) { |
| 1236 | 0 | Entry entry = table[index]; |
| 1237 | ||
| 1238 | 0 | while (entry != null) { |
| 1239 | 0 | s.writeObject(entry.key); |
| 1240 | 0 | s.writeObject(entry.value); |
| 1241 | 0 | entry = entry.next; |
| 1242 | } | |
| 1243 | } | |
| 1244 | } | |
| 1245 | ||
| 1246 | /** | |
| 1247 | * Reconstitute the <tt>ConcurrentReaderHashMap</tt> instance from a | |
| 1248 | * stream (i.e., deserialize it). | |
| 1249 | */ | |
| 1250 | 0 | private synchronized void readObject(java.io.ObjectInputStream s) |
| 1251 | throws IOException, ClassNotFoundException { | |
| 1252 | // Read in the threshold, loadfactor, and any hidden stuff | |
| 1253 | 0 | s.defaultReadObject(); |
| 1254 | ||
| 1255 | // Read in number of buckets and allocate the bucket array; | |
| 1256 | 0 | int numBuckets = s.readInt(); |
| 1257 | 0 | table = new Entry[numBuckets]; |
| 1258 | ||
| 1259 | // Read in size (number of Mappings) | |
| 1260 | 0 | int size = s.readInt(); |
| 1261 | ||
| 1262 | // Read the keys and values, and put the mappings in the table | |
| 1263 | 0 | for (int i = 0; i < size; i++) { |
| 1264 | 0 | Object key = s.readObject(); |
| 1265 | 0 | Object value = s.readObject(); |
| 1266 | 0 | put(key, value); |
| 1267 | } | |
| 1268 | } | |
| 1269 | ||
| 1270 | /** | |
| 1271 | * Return the number of slots in this table | |
| 1272 | */ | |
| 1273 | ||
| 1274 | 0 | public synchronized int capacity() { |
| 1275 | 0 | return table.length; |
| 1276 | } | |
| 1277 | ||
| 1278 | /** | |
| 1279 | * Return the load factor | |
| 1280 | */ | |
| 1281 | 0 | public float loadFactor() { |
| 1282 | 0 | return loadFactor; |
| 1283 | } | |
| 1284 | } |
|
||||||||||