Clover coverage report -
Coverage timestamp: Mon Jan 17 2005 23:51:40 PST
file stats: LOC: 2,074   Methods: 88
NCLOC: 919   Classes: 5
 
 Source file Conditionals Statements Methods TOTAL
AbstractConcurrentReadCache.java 56.1% 58.4% 51.1% 56.9%
coverage coverage
 1   
 /*
 2   
  * Copyright (c) 2002-2003 by OpenSymphony
 3   
  * All rights reserved.
 4   
  */
 5   
 /*
 6   
         File: AbstractConcurrentReadCache
 7   
 
 8   
         Written by Doug Lea. Adapted from JDK1.2 HashMap.java and Hashtable.java
 9   
         which carries the following copyright:
 10   
 
 11   
                  * Copyright 1997 by Sun Microsystems, Inc.,
 12   
                  * 901 San Antonio Road, Palo Alto, California, 94303, U.S.A.
 13   
                  * All rights reserved.
 14   
                  *
 15   
                  * This software is the confidential and proprietary information
 16   
                  * of Sun Microsystems, Inc. ("Confidential Information").  You
 17   
                  * shall not disclose such Confidential Information and shall use
 18   
                  * it only in accordance with the terms of the license agreement
 19   
                  * you entered into with Sun.
 20   
 
 21   
         This class is a modified version of ConcurrentReaderHashMap, which was written
 22   
         by Doug Lea (http://gee.cs.oswego.edu/dl/). The modifications where done
 23   
         by Pyxis Technologies. This is a base class for the OSCache module of the
 24   
         openSymphony project (www.opensymphony.com).
 25   
 
 26   
         History:
 27   
         Date       Who                What
 28   
         28oct1999  dl               Created
 29   
         14dec1999  dl               jmm snapshot
 30   
         19apr2000  dl               use barrierLock
 31   
         12jan2001  dl               public release
 32   
         Oct2001    abergevin@pyxis-tech.com
 33   
                                                                 Integrated persistence and outer algorithm support
 34   
 */
 35   
 package com.opensymphony.oscache.base.algorithm;
 36   
 
 37   
 
 38   
 /** OpenSymphony BEGIN */
 39   
 import com.opensymphony.oscache.base.CacheEntry;
 40   
 import com.opensymphony.oscache.base.persistence.CachePersistenceException;
 41   
 import com.opensymphony.oscache.base.persistence.PersistenceListener;
 42   
 
 43   
 import org.apache.commons.logging.Log;
 44   
 import org.apache.commons.logging.LogFactory;
 45   
 
 46   
 import java.io.IOException;
 47   
 import java.io.Serializable;
 48   
 
 49   
 import java.util.*;
 50   
 
 51   
 /**
 52   
  * A version of Hashtable that supports mostly-concurrent reading, but exclusive writing.
 53   
  * Because reads are not limited to periods
 54   
  * without writes, a concurrent reader policy is weaker than a classic
 55   
  * reader/writer policy, but is generally faster and allows more
 56   
  * concurrency. This class is a good choice especially for tables that
 57   
  * are mainly created by one thread during the start-up phase of a
 58   
  * program, and from then on, are mainly read (with perhaps occasional
 59   
  * additions or removals) in many threads.  If you also need concurrency
 60   
  * among writes, consider instead using ConcurrentHashMap.
 61   
  * <p>
 62   
  *
 63   
  * Successful retrievals using get(key) and containsKey(key) usually
 64   
  * run without locking. Unsuccessful ones (i.e., when the key is not
 65   
  * present) do involve brief synchronization (locking).  Also, the
 66   
  * size and isEmpty methods are always synchronized.
 67   
  *
 68   
  * <p> Because retrieval operations can ordinarily overlap with
 69   
  * writing operations (i.e., put, remove, and their derivatives),
 70   
  * retrievals can only be guaranteed to return the results of the most
 71   
  * recently <em>completed</em> operations holding upon their
 72   
  * onset. Retrieval operations may or may not return results
 73   
  * reflecting in-progress writing operations.  However, the retrieval
 74   
  * operations do always return consistent results -- either those
 75   
  * holding before any single modification or after it, but never a
 76   
  * nonsense result.  For aggregate operations such as putAll and
 77   
  * clear, concurrent reads may reflect insertion or removal of only
 78   
  * some entries. In those rare contexts in which you use a hash table
 79   
  * to synchronize operations across threads (for example, to prevent
 80   
  * reads until after clears), you should either encase operations
 81   
  * in synchronized blocks, or instead use java.util.Hashtable.
 82   
  *
 83   
  * <p>
 84   
  *
 85   
  * This class also supports optional guaranteed
 86   
  * exclusive reads, simply by surrounding a call within a synchronized
 87   
  * block, as in <br>
 88   
  * <code>AbstractConcurrentReadCache t; ... Object v; <br>
 89   
  * synchronized(t) { v = t.get(k); } </code> <br>
 90   
  *
 91   
  * But this is not usually necessary in practice. For
 92   
  * example, it is generally inefficient to write:
 93   
  *
 94   
  * <pre>
 95   
  *   AbstractConcurrentReadCache t; ...            // Inefficient version
 96   
  *   Object key; ...
 97   
  *   Object value; ...
 98   
  *   synchronized(t) {
 99   
  *     if (!t.containsKey(key))
 100   
  *       t.put(key, value);
 101   
  *       // other code if not previously present
 102   
  *     }
 103   
  *     else {
 104   
  *       // other code if it was previously present
 105   
  *     }
 106   
  *   }
 107   
  *</pre>
 108   
  * Instead, just take advantage of the fact that put returns
 109   
  * null if the key was not previously present:
 110   
  * <pre>
 111   
  *   AbstractConcurrentReadCache t; ...                // Use this instead
 112   
  *   Object key; ...
 113   
  *   Object value; ...
 114   
  *   Object oldValue = t.put(key, value);
 115   
  *   if (oldValue == null) {
 116   
  *     // other code if not previously present
 117   
  *   }
 118   
  *   else {
 119   
  *     // other code if it was previously present
 120   
  *   }
 121   
  *</pre>
 122   
  * <p>
 123   
  *
 124   
  * Iterators and Enumerations (i.e., those returned by
 125   
  * keySet().iterator(), entrySet().iterator(), values().iterator(),
 126   
  * keys(), and elements()) return elements reflecting the state of the
 127   
  * hash table at some point at or since the creation of the
 128   
  * iterator/enumeration.  They will return at most one instance of
 129   
  * each element (via next()/nextElement()), but might or might not
 130   
  * reflect puts and removes that have been processed since they were
 131   
  * created.  They do <em>not</em> throw ConcurrentModificationException.
 132   
  * However, these iterators are designed to be used by only one
 133   
  * thread at a time. Sharing an iterator across multiple threads may
 134   
  * lead to unpredictable results if the table is being concurrently
 135   
  * modified.  Again, you can ensure interference-free iteration by
 136   
  * enclosing the iteration in a synchronized block.  <p>
 137   
  *
 138   
  * This class may be used as a direct replacement for any use of
 139   
  * java.util.Hashtable that does not depend on readers being blocked
 140   
  * during updates. Like Hashtable but unlike java.util.HashMap,
 141   
  * this class does NOT allow <tt>null</tt> to be used as a key or
 142   
  * value.  This class is also typically faster than ConcurrentHashMap
 143   
  * when there is usually only one thread updating the table, but
 144   
  * possibly many retrieving values from it.
 145   
  * <p>
 146   
  *
 147   
  * Implementation note: A slightly faster implementation of
 148   
  * this class will be possible once planned Java Memory Model
 149   
  * revisions are in place.
 150   
  *
 151   
  * <p>[<a href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>]
 152   
  **/
 153   
 public abstract class AbstractConcurrentReadCache extends AbstractMap implements Map, Cloneable, Serializable {
 154   
     /**
 155   
      * The default initial number of table slots for this table (32).
 156   
      * Used when not otherwise specified in constructor.
 157   
      **/
 158   
     public static int DEFAULT_INITIAL_CAPACITY = 32;
 159   
 
 160   
     /**
 161   
      * The minimum capacity.
 162   
      * Used if a lower value is implicitly specified
 163   
      * by either of the constructors with arguments.
 164   
      * MUST be a power of two.
 165   
      */
 166   
     private static final int MINIMUM_CAPACITY = 4;
 167   
 
 168   
     /**
 169   
      * The maximum capacity.
 170   
      * Used if a higher value is implicitly specified
 171   
      * by either of the constructors with arguments.
 172   
      * MUST be a power of two <= 1<<30.
 173   
      */
 174   
     private static final int MAXIMUM_CAPACITY = 1 << 30;
 175   
 
 176   
     /**
 177   
      * The default load factor for this table.
 178   
      * Used when not otherwise specified in constructor, the default is 0.75f.
 179   
      **/
 180   
     public static final float DEFAULT_LOAD_FACTOR = 0.75f;
 181   
 
 182   
     //OpenSymphony BEGIN (pretty long!)
 183   
     protected static final String NULL = "_nul!~";
 184   
     protected static Log log = LogFactory.getLog(AbstractConcurrentReadCache.class);
 185   
 
 186   
     /*
 187   
       The basic strategy is an optimistic-style scheme based on
 188   
       the guarantee that the hash table and its lists are always
 189   
       kept in a consistent enough state to be read without locking:
 190   
 
 191   
       * Read operations first proceed without locking, by traversing the
 192   
          apparently correct list of the apparently correct bin. If an
 193   
          entry is found, but not invalidated (value field null), it is
 194   
          returned. If not found, operations must recheck (after a memory
 195   
          barrier) to make sure they are using both the right list and
 196   
          the right table (which can change under resizes). If
 197   
          invalidated, reads must acquire main update lock to wait out
 198   
          the update, and then re-traverse.
 199   
 
 200   
       * All list additions are at the front of each bin, making it easy
 201   
          to check changes, and also fast to traverse.  Entry next
 202   
          pointers are never assigned. Remove() builds new nodes when
 203   
          necessary to preserve this.
 204   
 
 205   
       * Remove() (also clear()) invalidates removed nodes to alert read
 206   
          operations that they must wait out the full modifications.
 207   
 
 208   
     */
 209   
 
 210   
     /**
 211   
      * Lock used only for its memory effects. We use a Boolean
 212   
      * because it is serializable, and we create a new one because
 213   
      * we need a unique object for each cache instance.
 214   
      **/
 215   
     protected final Boolean barrierLock = new Boolean(true);
 216   
 
 217   
     /**
 218   
      * field written to only to guarantee lock ordering.
 219   
      **/
 220   
     protected transient Object lastWrite;
 221   
 
 222   
     /**
 223   
      * The hash table data.
 224   
      */
 225   
     protected transient Entry[] table;
 226   
 
 227   
     /**
 228   
      * The total number of mappings in the hash table.
 229   
      */
 230   
     protected transient int count;
 231   
 
 232   
     /**
 233   
      * Persistence listener.
 234   
      */
 235   
     protected PersistenceListener persistenceListener = null;
 236   
 
 237   
     /**
 238   
      * Use memory cache or not.
 239   
      */
 240   
     protected boolean memoryCaching = true;
 241   
 
 242   
     /**
 243   
      * Use unlimited disk caching.
 244   
      */
 245   
     protected boolean unlimitedDiskCache = false;
 246   
 
 247   
     /**
 248   
      * The load factor for the hash table.
 249   
      *
 250   
      * @serial
 251   
      */
 252   
     protected float loadFactor;
 253   
 
 254   
     /**
 255   
      * Default cache capacity (number of entries).
 256   
      */
 257   
     protected final int DEFAULT_MAX_ENTRIES = 100;
 258   
 
 259   
     /**
 260   
      * Max number of element in cache when considered unlimited.
 261   
      */
 262   
     protected final int UNLIMITED = 2147483646;
 263   
     protected transient Collection values = null;
 264   
 
 265   
     /**
 266   
      * A HashMap containing the group information.
 267   
      * Each entry uses the group name as the key, and holds a
 268   
      * <code>Set</code> of containing keys of all
 269   
      * the cache entries that belong to that particular group.
 270   
      */
 271   
     protected HashMap groups = null;
 272   
     protected transient Set entrySet = null;
 273   
 
 274   
     // Views
 275   
     protected transient Set keySet = null;
 276   
 
 277   
     /**
 278   
      * Cache capacity (number of entries).
 279   
      */
 280   
     protected int maxEntries = DEFAULT_MAX_ENTRIES;
 281   
 
 282   
     /**
 283   
      * The table is rehashed when its size exceeds this threshold.
 284   
      * (The value of this field is always (int)(capacity * loadFactor).)
 285   
      *
 286   
      * @serial
 287   
      */
 288   
     protected int threshold;
 289   
 
 290   
     /**
 291   
      * Use overflow persistence caching.
 292   
      */
 293   
     private boolean overflowPersistence = false;
 294   
 
 295   
     /**
 296   
      * Constructs a new, empty map with the specified initial capacity and the specified load factor.
 297   
      *
 298   
      * @param initialCapacity the initial capacity
 299   
      *  The actual initial capacity is rounded to the nearest power of two.
 300   
      * @param loadFactor  the load factor of the AbstractConcurrentReadCache
 301   
      * @throws IllegalArgumentException  if the initial maximum number
 302   
      *               of elements is less
 303   
      *               than zero, or if the load factor is nonpositive.
 304   
      */
 305  108
     public AbstractConcurrentReadCache(int initialCapacity, float loadFactor) {
 306  108
         if (loadFactor <= 0) {
 307  0
             throw new IllegalArgumentException("Illegal Load factor: " + loadFactor);
 308   
         }
 309   
 
 310  108
         this.loadFactor = loadFactor;
 311   
 
 312  108
         int cap = p2capacity(initialCapacity);
 313  108
         table = new Entry[cap];
 314  108
         threshold = (int) (cap * loadFactor);
 315   
     }
 316   
 
 317   
     /**
 318   
      * Constructs a new, empty map with the specified initial capacity and default load factor.
 319   
      *
 320   
      * @param   initialCapacity   the initial capacity of the
 321   
      *                            AbstractConcurrentReadCache.
 322   
      * @throws    IllegalArgumentException if the initial maximum number
 323   
      *              of elements is less
 324   
      *              than zero.
 325   
      */
 326  0
     public AbstractConcurrentReadCache(int initialCapacity) {
 327  0
         this(initialCapacity, DEFAULT_LOAD_FACTOR);
 328   
     }
 329   
 
 330   
     /**
 331   
      * Constructs a new, empty map with a default initial capacity and load factor.
 332   
      */
 333  108
     public AbstractConcurrentReadCache() {
 334  108
         this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
 335   
     }
 336   
 
 337   
     /**
 338   
      * Constructs a new map with the same mappings as the given map.
 339   
      * The map is created with a capacity of twice the number of mappings in
 340   
      * the given map or 11 (whichever is greater), and a default load factor.
 341   
      */
 342  0
     public AbstractConcurrentReadCache(Map t) {
 343  0
         this(Math.max(2 * t.size(), 11), DEFAULT_LOAD_FACTOR);
 344  0
         putAll(t);
 345   
     }
 346   
 
 347   
     /**
 348   
      * Returns <tt>true</tt> if this map contains no key-value mappings.
 349   
      *
 350   
      * @return <tt>true</tt> if this map contains no key-value mappings.
 351   
      */
 352  0
     public synchronized boolean isEmpty() {
 353  0
         return count == 0;
 354   
     }
 355   
 
 356   
     /**
 357   
      * Returns a set of the cache keys that reside in a particular group.
 358   
      *
 359   
      * @param   groupName The name of the group to retrieve.
 360   
      * @return  a set containing all of the keys of cache entries that belong
 361   
      * to this group, or <code>null</code> if the group was not found.
 362   
      * @exception  NullPointerException if the groupName is <code>null</code>.
 363   
      */
 364  36
     public Set getGroup(String groupName) {
 365  36
         if (log.isDebugEnabled()) {
 366  0
             log.debug("getGroup called (group=" + groupName + ")");
 367   
         }
 368   
 
 369  36
         Set groupEntries = null;
 370   
 
 371  36
         if (memoryCaching && (groups != null)) {
 372  24
             groupEntries = (Set) getGroupsForReading().get(groupName);
 373   
         }
 374   
 
 375  36
         if (groupEntries == null) {
 376   
             // Not in the map, try the persistence layer
 377  12
             groupEntries = persistRetrieveGroup(groupName);
 378   
         } else {
 379   
             // We don't want to give them back the internal Set object
 380  24
             groupEntries = new HashSet(groupEntries);
 381   
         }
 382   
 
 383  36
         return groupEntries;
 384   
     }
 385   
 
 386   
     /**
 387   
      * Set the cache capacity
 388   
      */
 389  48
     public void setMaxEntries(int newLimit) {
 390  48
         if (newLimit > 0) {
 391  32
             maxEntries = newLimit;
 392   
 
 393  32
             synchronized (this) { // because remove() isn't synchronized
 394   
 
 395  32
                 while (size() > maxEntries) {
 396  16
                     remove(removeItem(), false);
 397   
                 }
 398   
             }
 399   
         } else {
 400   
             // Capacity must be at least 1
 401  16
             throw new IllegalArgumentException("Cache maximum number of entries must be at least 1");
 402   
         }
 403   
     }
 404   
 
 405   
     /**
 406   
      * Retrieve the cache capacity (number of entries).
 407   
      */
 408  32
     public int getMaxEntries() {
 409  32
         return maxEntries;
 410   
     }
 411   
 
 412   
     /**
 413   
      * Sets the memory caching flag.
 414   
      */
 415  108
     public void setMemoryCaching(boolean memoryCaching) {
 416  108
         this.memoryCaching = memoryCaching;
 417   
     }
 418   
 
 419   
     /**
 420   
      * Check if memory caching is used.
 421   
      */
 422  24
     public boolean isMemoryCaching() {
 423  24
         return memoryCaching;
 424   
     }
 425   
 
 426   
     /**
 427   
      * Set the persistence listener to use.
 428   
      */
 429  99
     public void setPersistenceListener(PersistenceListener listener) {
 430  99
         this.persistenceListener = listener;
 431   
     }
 432   
 
 433   
     /**
 434   
      * Get the persistence listener.
 435   
      */
 436  64
     public PersistenceListener getPersistenceListener() {
 437  64
         return persistenceListener;
 438   
     }
 439   
 
 440   
     /**
 441   
      * Sets the unlimited disk caching flag.
 442   
      */
 443  84
     public void setUnlimitedDiskCache(boolean unlimitedDiskCache) {
 444  84
         this.unlimitedDiskCache = unlimitedDiskCache;
 445   
     }
 446   
 
 447   
     /**
 448   
      * Check if we use unlimited disk cache.
 449   
      */
 450  0
     public boolean isUnlimitedDiskCache() {
 451  0
         return unlimitedDiskCache;
 452   
     }
 453   
 
 454   
     /**
 455   
      * Check if we use overflowPersistence
 456   
      *
 457   
      * @return Returns the overflowPersistence.
 458   
      */
 459  16
     public boolean isOverflowPersistence() {
 460  16
         return this.overflowPersistence;
 461   
     }
 462   
 
 463   
     /**
 464   
      * Sets the overflowPersistence flag
 465   
      *
 466   
      * @param overflowPersistence The overflowPersistence to set.
 467   
      */
 468  108
     public void setOverflowPersistence(boolean overflowPersistence) {
 469  108
         this.overflowPersistence = overflowPersistence;
 470   
     }
 471   
 
 472   
     /**
 473   
      * Return the number of slots in this table.
 474   
      **/
 475  0
     public synchronized int capacity() {
 476  0
         return table.length;
 477   
     }
 478   
 
 479   
     /**
 480   
      * Removes all mappings from this map.
 481   
      */
 482  188
     public synchronized void clear() {
 483  188
         Entry[] tab = table;
 484   
 
 485  188
         for (int i = 0; i < tab.length; ++i) {
 486   
             // must invalidate all to force concurrent get's to wait and then retry
 487  6016
             for (Entry e = tab[i]; e != null; e = e.next) {
 488  204
                 e.value = null;
 489   
 
 490   
                 /** OpenSymphony BEGIN */
 491  204
                 itemRemoved(e.key);
 492   
 
 493   
                 /** OpenSymphony END */
 494   
             }
 495   
 
 496  6016
             tab[i] = null;
 497   
         }
 498   
 
 499   
         // Clean out the entire disk cache
 500  188
         persistClear();
 501   
 
 502  188
         count = 0;
 503  188
         recordModification(tab);
 504   
     }
 505   
 
 506   
     /**
 507   
      * Returns a shallow copy of this.
 508   
      * <tt>AbstractConcurrentReadCache</tt> instance: the keys and
 509   
      * values themselves are not cloned.
 510   
      *
 511   
      * @return a shallow copy of this map.
 512   
      */
 513  0
     public synchronized Object clone() {
 514  0
         try {
 515  0
             AbstractConcurrentReadCache t = (AbstractConcurrentReadCache) super.clone();
 516  0
             t.keySet = null;
 517  0
             t.entrySet = null;
 518  0
             t.values = null;
 519   
 
 520  0
             Entry[] tab = table;
 521  0
             t.table = new Entry[tab.length];
 522   
 
 523  0
             Entry[] ttab = t.table;
 524   
 
 525  0
             for (int i = 0; i < tab.length; ++i) {
 526  0
                 Entry first = tab[i];
 527   
 
 528  0
                 if (first != null) {
 529  0
                     ttab[i] = (Entry) (first.clone());
 530   
                 }
 531   
             }
 532   
 
 533  0
             return t;
 534   
         } catch (CloneNotSupportedException e) {
 535   
             // this shouldn't happen, since we are Cloneable
 536  0
             throw new InternalError();
 537   
         }
 538   
     }
 539   
 
 540   
     /**
 541   
      * Tests if some key maps into the specified value in this table.
 542   
      * This operation is more expensive than the <code>containsKey</code>
 543   
      * method.<p>
 544   
      *
 545   
      * Note that this method is identical in functionality to containsValue,
 546   
      * (which is part of the Map interface in the collections framework).
 547   
      *
 548   
      * @param      value   a value to search for.
 549   
      * @return     <code>true</code> if and only if some key maps to the
 550   
      *             <code>value</code> argument in this table as
 551   
      *             determined by the <tt>equals</tt> method;
 552   
      *             <code>false</code> otherwise.
 553   
      * @exception  NullPointerException  if the value is <code>null</code>.
 554   
      * @see        #containsKey(Object)
 555   
      * @see        #containsValue(Object)
 556   
      * @see           Map
 557   
      */
 558  0
     public boolean contains(Object value) {
 559  0
         return containsValue(value);
 560   
     }
 561   
 
 562   
     /**
 563   
      * Tests if the specified object is a key in this table.
 564   
      *
 565   
      * @param   key   possible key.
 566   
      * @return  <code>true</code> if and only if the specified object
 567   
      *          is a key in this table, as determined by the
 568   
      *          <tt>equals</tt> method; <code>false</code> otherwise.
 569   
      * @exception  NullPointerException  if the key is
 570   
      *               <code>null</code>.
 571   
      * @see     #contains(Object)
 572   
      */
 573  24
     public boolean containsKey(Object key) {
 574  24
         return get(key) != null;
 575   
 
 576   
         /** OpenSymphony BEGIN */
 577   
 
 578   
         // TODO: Also check the persistence?
 579   
 
 580   
         /** OpenSymphony END */
 581   
     }
 582   
 
 583   
     /**
 584   
      * Returns <tt>true</tt> if this map maps one or more keys to the
 585   
      * specified value. Note: This method requires a full internal
 586   
      * traversal of the hash table, and so is much slower than
 587   
      * method <tt>containsKey</tt>.
 588   
      *
 589   
      * @param value value whose presence in this map is to be tested.
 590   
      * @return <tt>true</tt> if this map maps one or more keys to the
 591   
      * specified value.
 592   
      * @exception  NullPointerException  if the value is <code>null</code>.
 593   
      */
 594  0
     public boolean containsValue(Object value) {
 595  0
         if (value == null) {
 596  0
             throw new NullPointerException();
 597   
         }
 598   
 
 599  0
         Entry[] tab = getTableForReading();
 600   
 
 601  0
         for (int i = 0; i < tab.length; ++i) {
 602  0
             for (Entry e = tab[i]; e != null; e = e.next) {
 603  0
                 Object v = e.value;
 604   
 
 605  0
                 if ((v != null) && value.equals(v)) {
 606  0
                     return true;
 607   
                 }
 608   
             }
 609   
         }
 610   
 
 611  0
         return false;
 612   
     }
 613   
 
 614   
     /**
 615   
      * Returns an enumeration of the values in this table.
 616   
      * Use the Enumeration methods on the returned object to fetch the elements
 617   
      * sequentially.
 618   
      *
 619   
      * @return  an enumeration of the values in this table.
 620   
      * @see     java.util.Enumeration
 621   
      * @see     #keys()
 622   
      * @see        #values()
 623   
      * @see        Map
 624   
      */
 625  0
     public Enumeration elements() {
 626  0
         return new ValueIterator();
 627   
     }
 628   
 
 629   
     /**
 630   
      * Returns a collection view of the mappings contained in this map.
 631   
      * Each element in the returned collection is a <tt>Map.Entry</tt>.  The
 632   
      * collection is backed by the map, so changes to the map are reflected in
 633   
      * the collection, and vice-versa.  The collection supports element
 634   
      * removal, which removes the corresponding mapping from the map, via the
 635   
      * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
 636   
      * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
 637   
      * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
 638   
      *
 639   
      * @return a collection view of the mappings contained in this map.
 640   
      */
 641  24
     public Set entrySet() {
 642  24
         Set es = entrySet;
 643   
 
 644  24
         if (es != null) {
 645  0
             return es;
 646   
         } else {
 647  24
             return entrySet = new AbstractSet() {
 648  24
                         public Iterator iterator() {
 649  24
                             return new HashIterator();
 650   
                         }
 651   
 
 652  0
                         public boolean contains(Object o) {
 653  0
                             if (!(o instanceof Map.Entry)) {
 654  0
                                 return false;
 655   
                             }
 656   
 
 657  0
                             Map.Entry entry = (Map.Entry) o;
 658  0
                             Object key = entry.getKey();
 659  0
                             Object v = AbstractConcurrentReadCache.this.get(key);
 660   
 
 661  0
                             return (v != null) && v.equals(entry.getValue());
 662   
                         }
 663   
 
 664  0
                         public boolean remove(Object o) {
 665  0
                             if (!(o instanceof Map.Entry)) {
 666  0
                                 return false;
 667   
                             }
 668   
 
 669  0
                             return AbstractConcurrentReadCache.this.findAndRemoveEntry((Map.Entry) o);
 670   
                         }
 671   
 
 672  0
                         public int size() {
 673  0
                             return AbstractConcurrentReadCache.this.size();
 674   
                         }
 675   
 
 676  0
                         public void clear() {
 677  0
                             AbstractConcurrentReadCache.this.clear();
 678   
                         }
 679   
                     };
 680   
         }
 681   
     }
 682   
 
 683   
     /**
 684   
      * Returns the value to which the specified key is mapped in this table.
 685   
      *
 686   
      * @param   key   a key in the table.
 687   
      * @return  the value to which the key is mapped in this table;
 688   
      *          <code>null</code> if the key is not mapped to any value in
 689   
      *          this table.
 690   
      * @exception  NullPointerException  if the key is
 691   
      *               <code>null</code>.
 692   
      * @see     #put(Object, Object)
 693   
      */
 694  859
     public Object get(Object key) {
 695  859
         if (log.isDebugEnabled()) {
 696  0
             log.debug("get called (key=" + key + ")");
 697   
         }
 698   
 
 699   
         // throw null pointer exception if key null
 700  859
         int hash = hash(key);
 701   
 
 702   
         /*
 703   
            Start off at the apparently correct bin.  If entry is found, we
 704   
            need to check after a barrier anyway.  If not found, we need a
 705   
            barrier to check if we are actually in right bin. So either
 706   
            way, we encounter only one barrier unless we need to retry.
 707   
            And we only need to fully synchronize if there have been
 708   
            concurrent modifications.
 709   
         */
 710  835
         Entry[] tab = table;
 711  835
         int index = hash & (tab.length - 1);
 712  835
         Entry first = tab[index];
 713  835
         Entry e = first;
 714   
 
 715  835
         for (;;) {
 716  916
             if (e == null) {
 717   
                 // If key apparently not there, check to
 718   
                 // make sure this was a valid read
 719  295
                 tab = getTableForReading();
 720   
 
 721  295
                 if (first == tab[index]) {
 722   
                     /** OpenSymphony BEGIN */
 723   
 
 724   
                     /* Previous code
 725   
                     return null;*/
 726   
 
 727   
                     // Not in the table, try persistence
 728  295
                     Object value = persistRetrieve(key);
 729   
 
 730  295
                     if (value != null) {
 731   
                         // Update the map, but don't persist the data
 732  24
                         put(key, value, false);
 733   
                     }
 734   
 
 735  295
                     return value;
 736   
 
 737   
                     /** OpenSymphony END */
 738   
                 } else {
 739   
                     // Wrong list -- must restart traversal at new first
 740  0
                     e = first = tab[index = hash & (tab.length - 1)];
 741   
                 }
 742   
             }
 743   
             // checking for pointer equality first wins in most applications
 744  621
             else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 745  540
                 Object value = e.value;
 746   
 
 747  540
                 if (value != null) {
 748   
                     /** OpenSymphony BEGIN */
 749   
 
 750   
                     /* Previous code
 751   
                     return value;*/
 752  540
                     if (NULL.equals(value)) {
 753   
                         // Memory cache disable, use disk
 754  120
                         value = persistRetrieve(e.key);
 755   
 
 756  120
                         if (value != null) {
 757  120
                             itemRetrieved(key);
 758   
                         }
 759   
 
 760  120
                         return value; // fix [CACHE-13]
 761   
                     } else {
 762  420
                         itemRetrieved(key);
 763   
 
 764  420
                         return value;
 765   
                     }
 766   
 
 767   
                     /** OpenSymphony END */
 768   
                 }
 769   
 
 770   
                 // Entry was invalidated during deletion. But it could
 771   
                 // have been re-inserted, so we must retraverse.
 772   
                 // To avoid useless contention, get lock to wait out modifications
 773   
                 // before retraversing.
 774  0
                 synchronized (this) {
 775  0
                     tab = table;
 776   
                 }
 777   
 
 778  0
                 e = first = tab[index = hash & (tab.length - 1)];
 779   
             } else {
 780  81
                 e = e.next;
 781   
             }
 782   
         }
 783   
     }
 784   
 
 785   
     /**
 786   
      * Returns a set view of the keys contained in this map.
 787   
      * The set is backed by the map, so changes to the map are reflected in the set, and
 788   
      * vice-versa.  The set supports element removal, which removes the
 789   
      * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
 790   
      * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
 791   
      * <tt>clear</tt> operations.  It does not support the <tt>add</tt> or
 792   
      * <tt>addAll</tt> operations.
 793   
      *
 794   
      * @return a set view of the keys contained in this map.
 795   
      */
 796  24
     public Set keySet() {
 797  24
         Set ks = keySet;
 798   
 
 799  24
         if (ks != null) {
 800  8
             return ks;
 801   
         } else {
 802  16
             return keySet = new AbstractSet() {
 803  24
                         public Iterator iterator() {
 804  24
                             return new KeyIterator();
 805   
                         }
 806   
 
 807  0
                         public int size() {
 808  0
                             return AbstractConcurrentReadCache.this.size();
 809   
                         }
 810   
 
 811  0
                         public boolean contains(Object o) {
 812  0
                             return AbstractConcurrentReadCache.this.containsKey(o);
 813   
                         }
 814   
 
 815  0
                         public boolean remove(Object o) {
 816  0
                             return AbstractConcurrentReadCache.this.remove(o) != null;
 817   
                         }
 818   
 
 819  0
                         public void clear() {
 820  0
                             AbstractConcurrentReadCache.this.clear();
 821   
                         }
 822   
                     };
 823   
         }
 824   
     }
 825   
 
 826   
     /**
 827   
      * Returns an enumeration of the keys in this table.
 828   
      *
 829   
      * @return  an enumeration of the keys in this table.
 830   
      * @see     Enumeration
 831   
      * @see     #elements()
 832   
      * @see        #keySet()
 833   
      * @see        Map
 834   
      */
 835  0
     public Enumeration keys() {
 836  0
         return new KeyIterator();
 837   
     }
 838   
 
 839   
     /**
 840   
      * Return the load factor
 841   
      **/
 842  0
     public float loadFactor() {
 843  0
         return loadFactor;
 844   
     }
 845   
 
 846   
     /**
 847   
      * Maps the specified <code>key</code> to the specified <code>value</code> in this table.
 848   
      * Neither the key nor the
 849   
      * value can be <code>null</code>. <p>
 850   
      *
 851   
      * The value can be retrieved by calling the <code>get</code> method
 852   
      * with a key that is equal to the original key.
 853   
      *
 854   
      * @param      key     the table key.
 855   
      * @param      value   the value.
 856   
      * @return     the previous value of the specified key in this table,
 857   
      *             or <code>null</code> if it did not have one.
 858   
      * @exception  NullPointerException  if the key or value is
 859   
      *               <code>null</code>.
 860   
      * @see     Object#equals(Object)
 861   
      * @see     #get(Object)
 862   
      */
 863   
     /** OpenSymphony BEGIN */
 864  618
     public Object put(Object key, Object value) {
 865   
         // Call the internal put using persistance
 866  618
         return put(key, value, true);
 867   
     }
 868   
 
 869   
     /**
 870   
      * Copies all of the mappings from the specified map to this one.
 871   
      *
 872   
      * These mappings replace any mappings that this map had for any of the
 873   
      * keys currently in the specified Map.
 874   
      *
 875   
      * @param t Mappings to be stored in this map.
 876   
      */
 877  0
     public synchronized void putAll(Map t) {
 878  0
         for (Iterator it = t.entrySet().iterator(); it.hasNext();) {
 879  0
             Map.Entry entry = (Map.Entry) it.next();
 880  0
             Object key = entry.getKey();
 881  0
             Object value = entry.getValue();
 882  0
             put(key, value);
 883   
         }
 884   
     }
 885   
 
 886   
     /**
 887   
      * Removes the key (and its corresponding value) from this table.
 888   
      * This method does nothing if the key is not in the table.
 889   
      *
 890   
      * @param   key   the key that needs to be removed.
 891   
      * @return  the value to which the key had been mapped in this table,
 892   
      *          or <code>null</code> if the key did not have a mapping.
 893   
      * @exception  NullPointerException  if the key is
 894   
      *               <code>null</code>.
 895   
      */
 896   
     /** OpenSymphony BEGIN */
 897  24
     public Object remove(Object key) {
 898  24
         return remove(key, true);
 899   
     }
 900   
 
 901   
     /**
 902   
      * Returns the total number of cache entries held in this map.
 903   
      *
 904   
      * @return the number of key-value mappings in this map.
 905   
      */
 906  680
     public synchronized int size() {
 907  680
         return count;
 908   
     }
 909   
 
 910   
     /**
 911   
      * Returns a collection view of the values contained in this map.
 912   
      * The collection is backed by the map, so changes to the map are reflected in
 913   
      * the collection, and vice-versa.  The collection supports element
 914   
      * removal, which removes the corresponding mapping from this map, via the
 915   
      * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
 916   
      * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
 917   
      * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
 918   
      *
 919   
      * @return a collection view of the values contained in this map.
 920   
      */
 921  0
     public Collection values() {
 922  0
         Collection vs = values;
 923   
 
 924  0
         if (vs != null) {
 925  0
             return vs;
 926   
         } else {
 927  0
             return values = new AbstractCollection() {
 928  0
                         public Iterator iterator() {
 929  0
                             return new ValueIterator();
 930   
                         }
 931   
 
 932  0
                         public int size() {
 933  0
                             return AbstractConcurrentReadCache.this.size();
 934   
                         }
 935   
 
 936  0
                         public boolean contains(Object o) {
 937  0
                             return AbstractConcurrentReadCache.this.containsValue(o);
 938   
                         }
 939   
 
 940  0
                         public void clear() {
 941  0
                             AbstractConcurrentReadCache.this.clear();
 942   
                         }
 943   
                     };
 944   
         }
 945   
     }
 946   
 
 947   
     /**
 948   
      * Get ref to groups.
 949   
      * The reference and the cells it
 950   
      * accesses will be at least as fresh as from last
 951   
      * use of barrierLock
 952   
      **/
 953  24
     protected final Map getGroupsForReading() {
 954  24
         synchronized (barrierLock) {
 955  24
             return groups;
 956   
         }
 957   
     }
 958   
 
 959   
     /**
 960   
      * Get ref to table; the reference and the cells it
 961   
      * accesses will be at least as fresh as from last
 962   
      * use of barrierLock
 963   
      **/
 964  343
     protected final Entry[] getTableForReading() {
 965  343
         synchronized (barrierLock) {
 966  343
             return table;
 967   
         }
 968   
     }
 969   
 
 970   
     /**
 971   
      * Force a memory synchronization that will cause
 972   
      * all readers to see table. Call only when already
 973   
      * holding main synch lock.
 974   
      **/
 975  764
     protected final void recordModification(Object x) {
 976  764
         synchronized (barrierLock) {
 977  764
             lastWrite = x;
 978   
         }
 979   
     }
 980   
 
 981   
     /**
 982   
      * Helper method for entrySet remove.
 983   
      **/
 984  0
     protected synchronized boolean findAndRemoveEntry(Map.Entry entry) {
 985  0
         Object key = entry.getKey();
 986  0
         Object v = get(key);
 987   
 
 988  0
         if ((v != null) && v.equals(entry.getValue())) {
 989  0
             remove(key);
 990   
 
 991  0
             return true;
 992   
         } else {
 993  0
             return false;
 994   
         }
 995   
     }
 996   
 
 997   
     /**
 998   
      * Remove an object from the persistence.
 999   
      * @param key The key of the object to remove
 1000   
      */
 1001  45
     protected void persistRemove(Object key) {
 1002  45
         if (log.isDebugEnabled()) {
 1003  0
             log.debug("PersistRemove called (key=" + key + ")");
 1004   
         }
 1005   
 
 1006  45
         if (persistenceListener != null) {
 1007  21
             try {
 1008  21
                 persistenceListener.remove((String) key);
 1009   
             } catch (CachePersistenceException e) {
 1010  0
                 log.error("[oscache] Exception removing cache entry with key '" + key + "' from persistence", e);
 1011   
             }
 1012   
         }
 1013   
     }
 1014   
 
 1015   
     /**
 1016   
      * Removes a cache group using the persistence listener.
 1017   
      * @param groupName The name of the group to remove
 1018   
      */
 1019  0
     protected void persistRemoveGroup(String groupName) {
 1020  0
         if (log.isDebugEnabled()) {
 1021  0
             log.debug("persistRemoveGroup called (groupName=" + groupName + ")");
 1022   
         }
 1023   
 
 1024  0
         if (persistenceListener != null) {
 1025  0
             try {
 1026  0
                 persistenceListener.removeGroup(groupName);
 1027   
             } catch (CachePersistenceException e) {
 1028  0
                 log.error("[oscache] Exception removing group " + groupName, e);
 1029   
             }
 1030   
         }
 1031   
     }
 1032   
 
 1033   
     /**
 1034   
      * Retrieve an object from the persistence listener.
 1035   
      * @param key The key of the object to retrieve
 1036   
      */
 1037  446
     protected Object persistRetrieve(Object key) {
 1038  446
         if (log.isDebugEnabled()) {
 1039  0
             log.debug("persistRetrieve called (key=" + key + ")");
 1040   
         }
 1041   
 
 1042  446
         Object entry = null;
 1043   
 
 1044  446
         if (persistenceListener != null) {
 1045  372
             try {
 1046  372
                 entry = persistenceListener.retrieve((String) key);
 1047   
             } catch (CachePersistenceException e) {
 1048   
                 /**
 1049   
                  * It is normal that we get an exception occasionally.
 1050   
                  * It happens when the item is invalidated (written or removed)
 1051   
                  * during read. The logic is constructed so that read is retried.
 1052   
                  */
 1053   
             }
 1054   
         }
 1055   
 
 1056  446
         return entry;
 1057   
     }
 1058   
 
 1059   
     /**
 1060   
      * Retrieves a cache group using the persistence listener.
 1061   
      * @param groupName The name of the group to retrieve
 1062   
      */
 1063  148
     protected Set persistRetrieveGroup(String groupName) {
 1064  148
         if (log.isDebugEnabled()) {
 1065  0
             log.debug("persistRetrieveGroup called (groupName=" + groupName + ")");
 1066   
         }
 1067   
 
 1068  148
         if (persistenceListener != null) {
 1069  109
             try {
 1070  109
                 return persistenceListener.retrieveGroup(groupName);
 1071   
             } catch (CachePersistenceException e) {
 1072  0
                 log.error("[oscache] Exception retrieving group " + groupName, e);
 1073   
             }
 1074   
         }
 1075   
 
 1076  39
         return null;
 1077   
     }
 1078   
 
 1079   
     /**
 1080   
      * Store an object in the cache using the persistence listener.
 1081   
      * @param key The object key
 1082   
      * @param obj The object to store
 1083   
      */
 1084  440
     protected void persistStore(Object key, Object obj) {
 1085  440
         if (log.isDebugEnabled()) {
 1086  0
             log.debug("persistStore called (key=" + key + ")");
 1087   
         }
 1088   
 
 1089  440
         if (persistenceListener != null) {
 1090  182
             try {
 1091  182
                 persistenceListener.store((String) key, obj);
 1092   
             } catch (CachePersistenceException e) {
 1093  0
                 log.error("[oscache] Exception persisting " + key, e);
 1094   
             }
 1095   
         }
 1096   
     }
 1097   
 
 1098   
     /**
 1099   
      * Creates or Updates a cache group using the persistence listener.
 1100   
      * @param groupName The name of the group to update
 1101   
      * @param group The entries for the group
 1102   
      */
 1103  130
     protected void persistStoreGroup(String groupName, Set group) {
 1104  130
         if (log.isDebugEnabled()) {
 1105  0
             log.debug("persistStoreGroup called (groupName=" + groupName + ")");
 1106   
         }
 1107   
 
 1108  130
         if (persistenceListener != null) {
 1109  98
             try {
 1110  98
                 if ((group == null) || group.isEmpty()) {
 1111  0
                     persistenceListener.removeGroup(groupName);
 1112   
                 } else {
 1113  98
                     persistenceListener.storeGroup(groupName, group);
 1114   
                 }
 1115   
             } catch (CachePersistenceException e) {
 1116  0
                 log.error("[oscache] Exception persisting group " + groupName, e);
 1117   
             }
 1118   
         }
 1119   
     }
 1120   
 
 1121   
     /**
 1122   
      * Removes the entire cache from persistent storage.
 1123   
      */
 1124  188
     protected void persistClear() {
 1125  188
         if (log.isDebugEnabled()) {
 1126  0
             log.debug("persistClear called");
 1127   
             ;
 1128   
         }
 1129   
 
 1130  188
         if (persistenceListener != null) {
 1131  73
             try {
 1132  73
                 persistenceListener.clear();
 1133   
             } catch (CachePersistenceException e) {
 1134  0
                 log.error("[oscache] Exception clearing persistent cache", e);
 1135   
             }
 1136   
         }
 1137   
     }
 1138   
 
 1139   
     /**
 1140   
      * Notify the underlying implementation that an item was put in the cache.
 1141   
      *
 1142   
      * @param key The cache key of the item that was put.
 1143   
      */
 1144   
     protected abstract void itemPut(Object key);
 1145   
 
 1146   
     /**
 1147   
      * Notify any underlying algorithm that an item has been retrieved from the cache.
 1148   
      *
 1149   
      * @param key The cache key of the item that was retrieved.
 1150   
      */
 1151   
     protected abstract void itemRetrieved(Object key);
 1152   
 
 1153   
     /**
 1154   
      * Notify the underlying implementation that an item was removed from the cache.
 1155   
      *
 1156   
      * @param key The cache key of the item that was removed.
 1157   
      */
 1158   
     protected abstract void itemRemoved(Object key);
 1159   
 
 1160   
     /**
 1161   
      * The cache has reached its cacpacity and an item needs to be removed.
 1162   
      * (typically according to an algorithm such as LRU or FIFO).
 1163   
      *
 1164   
      * @return The key of whichever item was removed.
 1165   
      */
 1166   
     protected abstract Object removeItem();
 1167   
 
 1168   
     /**
 1169   
      * Reconstitute the <tt>AbstractConcurrentReadCache</tt>.
 1170   
      * instance from a stream (i.e.,
 1171   
      * deserialize it).
 1172   
      */
 1173  0
     private synchronized void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
 1174   
         // Read in the threshold, loadfactor, and any hidden stuff
 1175  0
         s.defaultReadObject();
 1176   
 
 1177   
         // Read in number of buckets and allocate the bucket array;
 1178  0
         int numBuckets = s.readInt();
 1179  0
         table = new Entry[numBuckets];
 1180   
 
 1181   
         // Read in size (number of Mappings)
 1182  0
         int size = s.readInt();
 1183   
 
 1184   
         // Read the keys and values, and put the mappings in the table
 1185  0
         for (int i = 0; i < size; i++) {
 1186  0
             Object key = s.readObject();
 1187  0
             Object value = s.readObject();
 1188  0
             put(key, value);
 1189   
         }
 1190   
     }
 1191   
 
 1192   
     /**
 1193   
      * Rehashes the contents of this map into a new table with a larger capacity.
 1194   
      * This method is called automatically when the
 1195   
      * number of keys in this map exceeds its capacity and load factor.
 1196   
      */
 1197  4
     protected void rehash() {
 1198  4
         Entry[] oldMap = table;
 1199  4
         int oldCapacity = oldMap.length;
 1200   
 
 1201  4
         if (oldCapacity >= MAXIMUM_CAPACITY) {
 1202  0
             return;
 1203   
         }
 1204   
 
 1205  4
         int newCapacity = oldCapacity << 1;
 1206  4
         Entry[] newMap = new Entry[newCapacity];
 1207  4
         threshold = (int) (newCapacity * loadFactor);
 1208   
 
 1209   
         /*
 1210   
           We need to guarantee that any existing reads of oldMap can
 1211   
           proceed. So we cannot yet null out each oldMap bin.
 1212   
 
 1213   
           Because we are using power-of-two expansion, the elements
 1214   
           from each bin must either stay at same index, or move
 1215   
           to oldCapacity+index. We also minimize new node creation by
 1216   
           catching cases where old nodes can be reused because their
 1217   
           .next fields won't change. (This is checked only for sequences
 1218   
           of one and two. It is not worth checking longer ones.)
 1219   
         */
 1220  4
         for (int i = 0; i < oldCapacity; ++i) {
 1221  128
             Entry l = null;
 1222  128
             Entry h = null;
 1223  128
             Entry e = oldMap[i];
 1224   
 
 1225  128
             while (e != null) {
 1226  84
                 int hash = e.hash;
 1227  84
                 Entry next = e.next;
 1228   
 
 1229  84
                 if ((hash & oldCapacity) == 0) {
 1230   
                     // stays at newMap[i]
 1231  21
                     if (l == null) {
 1232   
                         // try to reuse node
 1233  21
                         if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) == 0))) {
 1234  12
                             l = e;
 1235   
 
 1236  12
                             break;
 1237   
                         }
 1238   
                     }
 1239   
 
 1240  9
                     l = new Entry(hash, e.key, e.value, l);
 1241   
                 } else {
 1242   
                     // moves to newMap[oldCapacity+i]
 1243  63
                     if (h == null) {
 1244  59
                         if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) != 0))) {
 1245  56
                             h = e;
 1246   
 
 1247  56
                             break;
 1248   
                         }
 1249   
                     }
 1250   
 
 1251  7
                     h = new Entry(hash, e.key, e.value, h);
 1252   
                 }
 1253   
 
 1254  16
                 e = next;
 1255   
             }
 1256   
 
 1257  128
             newMap[i] = l;
 1258  128
             newMap[oldCapacity + i] = h;
 1259   
         }
 1260   
 
 1261  4
         table = newMap;
 1262  4
         recordModification(newMap);
 1263   
     }
 1264   
 
 1265   
     /**
 1266   
      * Continuation of put(), called only when synch lock is
 1267   
      * held and interference has been detected.
 1268   
      **/
 1269   
     /** OpenSymphony BEGIN */
 1270   
 
 1271   
     /* Previous code
 1272   
     protected Object sput(Object key, Object value, int hash) {*/
 1273  0
     protected Object sput(Object key, Object value, int hash, boolean persist) {
 1274   
         /** OpenSymphony END */
 1275  0
         Entry[] tab = table;
 1276  0
         int index = hash & (tab.length - 1);
 1277  0
         Entry first = tab[index];
 1278  0
         Entry e = first;
 1279   
 
 1280  0
         for (;;) {
 1281  0
             if (e == null) {
 1282   
                 /** OpenSymphony BEGIN */
 1283   
 
 1284   
                 // Previous code
 1285   
                 //          Entry newEntry = new Entry(hash, key, value, first);
 1286  0
                 Entry newEntry;
 1287   
 
 1288  0
                 if (memoryCaching) {
 1289  0
                     newEntry = new Entry(hash, key, value, first);
 1290   
                 } else {
 1291  0
                     newEntry = new Entry(hash, key, NULL, first);
 1292   
                 }
 1293   
 
 1294  0
                 itemPut(key);
 1295   
 
 1296   
                 // Persist if required
 1297  0
                 if (persist && !overflowPersistence) {
 1298  0
                     persistStore(key, value);
 1299   
                 }
 1300   
 
 1301   
                 // If we have a CacheEntry, update the group lookups
 1302  0
                 if (value instanceof CacheEntry) {
 1303  0
                     updateGroups(null, (CacheEntry) value, persist);
 1304   
                 }
 1305   
 
 1306   
                 /**        OpenSymphony END */
 1307  0
                 tab[index] = newEntry;
 1308   
 
 1309  0
                 if (++count >= threshold) {
 1310  0
                     rehash();
 1311   
                 } else {
 1312  0
                     recordModification(newEntry);
 1313   
                 }
 1314   
 
 1315  0
                 return null;
 1316  0
             } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1317  0
                 Object oldValue = e.value;
 1318   
 
 1319   
                 /** OpenSymphony BEGIN */
 1320   
 
 1321   
                 /* Previous code
 1322   
                 e.value = value; */
 1323  0
                 if (memoryCaching) {
 1324  0
                     e.value = value;
 1325   
                 }
 1326   
 
 1327   
                 // Persist if required
 1328  0
                 if (persist && overflowPersistence) {
 1329  0
                     persistRemove(key);
 1330  0
                 } else if (persist) {
 1331  0
                     persistStore(key, value);
 1332   
                 }
 1333   
 
 1334  0
                 updateGroups(oldValue, value, persist);
 1335   
 
 1336  0
                 itemPut(key);
 1337   
 
 1338   
                 /** OpenSymphony END */
 1339  0
                 return oldValue;
 1340   
             } else {
 1341  0
                 e = e.next;
 1342   
             }
 1343   
         }
 1344   
     }
 1345   
 
 1346   
     /**
 1347   
      * Continuation of remove(), called only when synch lock is
 1348   
      * held and interference has been detected.
 1349   
      **/
 1350   
     /** OpenSymphony BEGIN */
 1351   
 
 1352   
     /* Previous code
 1353   
     protected Object sremove(Object key, int hash) { */
 1354  0
     protected Object sremove(Object key, int hash, boolean invokeAlgorithm) {
 1355   
         /** OpenSymphony END */
 1356  0
         Entry[] tab = table;
 1357  0
         int index = hash & (tab.length - 1);
 1358  0
         Entry first = tab[index];
 1359  0
         Entry e = first;
 1360   
 
 1361  0
         for (;;) {
 1362  0
             if (e == null) {
 1363  0
                 return null;
 1364  0
             } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1365  0
                 Object oldValue = e.value;
 1366  0
                 e.value = null;
 1367  0
                 count--;
 1368   
 
 1369   
                 /** OpenSymphony BEGIN */
 1370  0
                 if (!unlimitedDiskCache && !overflowPersistence) {
 1371  0
                     persistRemove(e.key);
 1372   
                 }
 1373   
 
 1374  0
                 if (overflowPersistence && ((size() + 1) >= maxEntries)) {
 1375  0
                     persistStore(key, oldValue);
 1376   
                 }
 1377   
 
 1378  0
                 if (invokeAlgorithm) {
 1379  0
                     itemRemoved(key);
 1380   
                 }
 1381   
 
 1382   
                 /** OpenSymphony END */
 1383  0
                 Entry head = e.next;
 1384   
 
 1385  0
                 for (Entry p = first; p != e; p = p.next) {
 1386  0
                     head = new Entry(p.hash, p.key, p.value, head);
 1387   
                 }
 1388   
 
 1389  0
                 tab[index] = head;
 1390  0
                 recordModification(head);
 1391   
 
 1392  0
                 return oldValue;
 1393   
             } else {
 1394  0
                 e = e.next;
 1395   
             }
 1396   
         }
 1397   
     }
 1398   
 
 1399   
     /**
 1400   
      * Save the state of the <tt>AbstractConcurrentReadCache</tt> instance to a stream.
 1401   
      * (i.e., serialize it).
 1402   
      *
 1403   
      * @serialData The <i>capacity</i> of the
 1404   
      * AbstractConcurrentReadCache (the length of the
 1405   
      * bucket array) is emitted (int), followed  by the
 1406   
      * <i>size</i> of the AbstractConcurrentReadCache (the number of key-value
 1407   
      * mappings), followed by the key (Object) and value (Object)
 1408   
      * for each key-value mapping represented by the AbstractConcurrentReadCache
 1409   
      * The key-value mappings are emitted in no particular order.
 1410   
      */
 1411  0
     private synchronized void writeObject(java.io.ObjectOutputStream s) throws IOException {
 1412   
         // Write out the threshold, loadfactor, and any hidden stuff
 1413  0
         s.defaultWriteObject();
 1414   
 
 1415   
         // Write out number of buckets
 1416  0
         s.writeInt(table.length);
 1417   
 
 1418   
         // Write out size (number of Mappings)
 1419  0
         s.writeInt(count);
 1420   
 
 1421   
         // Write out keys and values (alternating)
 1422  0
         for (int index = table.length - 1; index >= 0; index--) {
 1423  0
             Entry entry = table[index];
 1424   
 
 1425  0
             while (entry != null) {
 1426  0
                 s.writeObject(entry.key);
 1427  0
                 s.writeObject(entry.value);
 1428  0
                 entry = entry.next;
 1429   
             }
 1430   
         }
 1431   
     }
 1432   
 
 1433   
     /**
 1434   
      * Return hash code for Object x.
 1435   
      * Since we are using power-of-two
 1436   
      * tables, it is worth the effort to improve hashcode via
 1437   
      * the same multiplicative scheme as used in IdentityHashMap.
 1438   
      */
 1439  1541
     private static int hash(Object x) {
 1440  1541
         int h = x.hashCode();
 1441   
 
 1442   
         // Multiply by 127 (quickly, via shifts), and mix in some high
 1443   
         // bits to help guard against bunching of codes that are
 1444   
         // consecutive or equally spaced.
 1445  1517
         return ((h << 7) - h + (h >>> 9) + (h >>> 17));
 1446   
     }
 1447   
 
 1448   
     /**
 1449   
      * Add this cache key to the groups specified groups.
 1450   
      * We have to treat the
 1451   
      * memory and disk group mappings seperately so they remain valid for their
 1452   
      * corresponding memory/disk caches. (eg if mem is limited to 100 entries
 1453   
      * and disk is unlimited, the group mappings will be different).
 1454   
      *
 1455   
      * @param key The cache key that we are ading to the groups.
 1456   
      * @param newGroups the set of groups we want to add this cache entry to.
 1457   
      * @param persist A flag to indicate whether the keys should be added to
 1458   
      * the persistent cache layer.
 1459   
      */
 1460  152
     private void addGroupMappings(String key, Set newGroups, boolean persist) {
 1461   
         // Add this CacheEntry to the groups that it is now a member of
 1462  152
         for (Iterator it = newGroups.iterator(); it.hasNext();) {
 1463  142
             String groupName = (String) it.next();
 1464   
 
 1465   
             // Update the in-memory groups
 1466  142
             if (memoryCaching) {
 1467  102
                 if (groups == null) {
 1468  9
                     groups = new HashMap();
 1469   
                 }
 1470   
 
 1471  102
                 Set memoryGroup = (Set) groups.get(groupName);
 1472   
 
 1473  102
                 if (memoryGroup == null) {
 1474  33
                     memoryGroup = new HashSet();
 1475  33
                     groups.put(groupName, memoryGroup);
 1476   
                 }
 1477   
 
 1478  102
                 memoryGroup.add(key);
 1479   
             }
 1480   
 
 1481   
             // Update the persistent group maps
 1482  142
             if (persist) {
 1483  112
                 Set persistentGroup = persistRetrieveGroup(groupName);
 1484   
 
 1485  112
                 if (persistentGroup == null) {
 1486  47
                     persistentGroup = new HashSet();
 1487   
                 }
 1488   
 
 1489  112
                 persistentGroup.add(key);
 1490  112
                 persistStoreGroup(groupName, persistentGroup);
 1491   
             }
 1492   
         }
 1493   
     }
 1494   
 
 1495   
     /** OpenSymphony END (pretty long!) */
 1496   
     /**
 1497   
      * Returns the appropriate capacity (power of two) for the specified
 1498   
      * initial capacity argument.
 1499   
      */
 1500  108
     private int p2capacity(int initialCapacity) {
 1501  108
         int cap = initialCapacity;
 1502   
 
 1503   
         // Compute the appropriate capacity
 1504  108
         int result;
 1505   
 
 1506  108
         if ((cap > MAXIMUM_CAPACITY) || (cap < 0)) {
 1507  0
             result = MAXIMUM_CAPACITY;
 1508   
         } else {
 1509  108
             result = MINIMUM_CAPACITY;
 1510   
 
 1511  108
             while (result < cap) {
 1512  324
                 result <<= 1;
 1513   
             }
 1514   
         }
 1515   
 
 1516  108
         return result;
 1517   
     }
 1518   
 
 1519   
     /* Previous code
 1520   
     public Object put(Object key, Object value)*/
 1521  642
     private Object put(Object key, Object value, boolean persist) {
 1522   
         /** OpenSymphony END */
 1523  642
         if (value == null) {
 1524  24
             throw new NullPointerException();
 1525   
         }
 1526   
 
 1527  618
         int hash = hash(key);
 1528  618
         Entry[] tab = table;
 1529  618
         int index = hash & (tab.length - 1);
 1530  618
         Entry first = tab[index];
 1531  618
         Entry e = first;
 1532   
 
 1533  618
         for (;;) {
 1534  675
             if (e == null) {
 1535  512
                 synchronized (this) {
 1536  512
                     tab = table;
 1537   
 
 1538   
                     /** OpenSymphony BEGIN */
 1539   
 
 1540   
                     // Previous code
 1541   
 
 1542   
                     /*                                        if (first == tab[index]) {
 1543   
                                                                     //  Add to front of list
 1544   
                                                                     Entry newEntry = new Entry(hash, key, value, first);
 1545   
                                                                     tab[index] = newEntry;
 1546   
                                                                     if (++count >= threshold) rehash();
 1547   
                                                                     else recordModification(newEntry);
 1548   
                                                                     return null; */
 1549   
 
 1550   
                     // Remove an item if the cache is full
 1551  512
                     if (size() >= maxEntries) {
 1552  24
                         remove(removeItem(), false);
 1553   
                     }
 1554   
 
 1555  512
                     if (first == tab[index]) {
 1556   
                         //  Add to front of list
 1557  512
                         Entry newEntry = null;
 1558   
 
 1559  512
                         if (memoryCaching) {
 1560  446
                             newEntry = new Entry(hash, key, value, first);
 1561   
                         } else {
 1562  66
                             newEntry = new Entry(hash, key, NULL, first);
 1563   
                         }
 1564   
 
 1565  512
                         tab[index] = newEntry;
 1566  512
                         itemPut(key);
 1567   
 
 1568   
                         // Persist if required
 1569  512
                         if (persist && !overflowPersistence) {
 1570  339
                             persistStore(key, value);
 1571   
                         }
 1572   
 
 1573   
                         // If we have a CacheEntry, update the group lookups
 1574  512
                         if (value instanceof CacheEntry) {
 1575  256
                             updateGroups(null, (CacheEntry) value, persist);
 1576   
                         }
 1577   
 
 1578  512
                         if (++count >= threshold) {
 1579  4
                             rehash();
 1580   
                         } else {
 1581  508
                             recordModification(newEntry);
 1582   
                         }
 1583   
 
 1584  512
                         return newEntry;
 1585   
 
 1586   
                         /** OpenSymphony END  */
 1587   
                     } else {
 1588   
                         // wrong list -- retry
 1589   
 
 1590   
                         /** OpenSymphony BEGIN */
 1591   
 
 1592   
                         /* Previous code
 1593   
                         return sput(key, value, hash);*/
 1594  0
                         return sput(key, value, hash, persist);
 1595   
 
 1596   
                         /** OpenSymphony END */
 1597   
                     }
 1598   
                 }
 1599  163
             } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1600   
                 // synch to avoid race with remove and to
 1601   
                 // ensure proper serialization of multiple replaces
 1602  106
                 synchronized (this) {
 1603  106
                     tab = table;
 1604   
 
 1605  106
                     Object oldValue = e.value;
 1606   
 
 1607   
                     // [CACHE-118] - get the old cache entry even if there's no memory cache
 1608  106
                     if (persist && (oldValue == NULL)) {
 1609  31
                         oldValue = persistRetrieve(key);
 1610   
                     }
 1611   
 
 1612  106
                     if ((first == tab[index]) && (oldValue != null)) {
 1613   
                         /** OpenSymphony BEGIN */
 1614   
 
 1615   
                         /* Previous code
 1616   
                         e.value = value;
 1617   
                         return oldValue; */
 1618  106
                         if (memoryCaching) {
 1619  75
                             e.value = value;
 1620   
                         }
 1621   
 
 1622   
                         // Persist if required
 1623  106
                         if (persist && overflowPersistence) {
 1624  21
                             persistRemove(key);
 1625  85
                         } else if (persist) {
 1626  85
                             persistStore(key, value);
 1627   
                         }
 1628   
 
 1629  106
                         updateGroups(oldValue, value, persist);
 1630  106
                         itemPut(key);
 1631   
 
 1632  106
                         return oldValue;
 1633   
 
 1634   
                         /**        OpenSymphony END */
 1635   
                     } else {
 1636   
                         // retry if wrong list or lost race against concurrent remove
 1637   
 
 1638   
                         /** OpenSymphony BEGIN */
 1639   
 
 1640   
                         /* Previous code
 1641   
                         return sput(key, value, hash);*/
 1642  0
                         return sput(key, value, hash, persist);
 1643   
 
 1644   
                         /** OpenSymphony END */
 1645   
                     }
 1646   
                 }
 1647   
             } else {
 1648  57
                 e = e.next;
 1649   
             }
 1650   
         }
 1651   
     }
 1652   
 
 1653  64
     private Object remove(Object key, boolean invokeAlgorithm)
 1654   
     /* Previous code
 1655   
     public Object remove(Object key) */
 1656   
 
 1657   
     /** OpenSymphony END */  {
 1658   
         /*
 1659   
           Strategy:
 1660   
 
 1661   
           Find the entry, then
 1662   
             1. Set value field to null, to force get() to retry
 1663   
             2. Rebuild the list without this entry.
 1664   
                All entries following removed node can stay in list, but
 1665   
                all preceeding ones need to be cloned.  Traversals rely
 1666   
                on this strategy to ensure that elements will not be
 1667   
               repeated during iteration.
 1668   
         */
 1669   
 
 1670   
         /** OpenSymphony BEGIN */
 1671  64
         if (key == null) {
 1672  0
             return null;
 1673   
         }
 1674   
 
 1675   
         /** OpenSymphony END */
 1676  64
         int hash = hash(key);
 1677  64
         Entry[] tab = table;
 1678  64
         int index = hash & (tab.length - 1);
 1679  64
         Entry first = tab[index];
 1680  64
         Entry e = first;
 1681   
 
 1682  64
         for (;;) {
 1683  64
             if (e == null) {
 1684  0
                 tab = getTableForReading();
 1685   
 
 1686  0
                 if (first == tab[index]) {
 1687  0
                     return null;
 1688   
                 } else {
 1689   
                     // Wrong list -- must restart traversal at new first
 1690   
 
 1691   
                     /** OpenSymphony BEGIN */
 1692   
 
 1693   
                     /* Previous Code
 1694   
                     return sremove(key, hash); */
 1695  0
                     return sremove(key, hash, invokeAlgorithm);
 1696   
 
 1697   
                     /** OpenSymphony END */
 1698   
                 }
 1699  64
             } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1700  64
                 synchronized (this) {
 1701  64
                     tab = table;
 1702   
 
 1703  64
                     Object oldValue = e.value;
 1704   
 
 1705   
                     // re-find under synch if wrong list
 1706  64
                     if ((first != tab[index]) || (oldValue == null)) {
 1707   
                         /** OpenSymphony BEGIN */
 1708   
 
 1709   
                         /* Previous Code
 1710   
                         return sremove(key, hash); */
 1711  0
                         return sremove(key, hash, invokeAlgorithm);
 1712   
                     }
 1713   
 
 1714   
                     /** OpenSymphony END */
 1715  64
                     e.value = null;
 1716  64
                     count--;
 1717   
 
 1718   
                     /** OpenSymphony BEGIN */
 1719  64
                     if (!unlimitedDiskCache && !overflowPersistence) {
 1720  24
                         persistRemove(e.key);
 1721   
                     }
 1722   
 
 1723  64
                     if (overflowPersistence && ((size() + 1) >= maxEntries)) {
 1724  16
                         persistStore(key, oldValue);
 1725   
                     }
 1726   
 
 1727  64
                     if (invokeAlgorithm) {
 1728  24
                         itemRemoved(key);
 1729   
                     }
 1730   
 
 1731   
                     /** OpenSymphony END */
 1732  64
                     Entry head = e.next;
 1733   
 
 1734  64
                     for (Entry p = first; p != e; p = p.next) {
 1735  0
                         head = new Entry(p.hash, p.key, p.value, head);
 1736   
                     }
 1737   
 
 1738  64
                     tab[index] = head;
 1739  64
                     recordModification(head);
 1740   
 
 1741  64
                     return oldValue;
 1742   
                 }
 1743   
             } else {
 1744  0
                 e = e.next;
 1745   
             }
 1746   
         }
 1747   
     }
 1748   
 
 1749   
     /**
 1750   
      * Remove this CacheEntry from the groups it no longer belongs to.
 1751   
      *  We have to treat the memory and disk group mappings seperately so they remain
 1752   
      * valid for their corresponding memory/disk caches. (eg if mem is limited
 1753   
      * to 100 entries and disk is unlimited, the group mappings will be
 1754   
      * different).
 1755   
      *
 1756   
      * @param key The cache key that we are removing from the groups.
 1757   
      * @param oldGroups the set of groups we want to remove the cache entry
 1758   
      * from.
 1759   
      * @param persist A flag to indicate whether the keys should be removed
 1760   
      * from the persistent cache layer.
 1761   
      */
 1762  80
     private void removeGroupMappings(String key, Set oldGroups, boolean persist) {
 1763  80
         for (Iterator it = oldGroups.iterator(); it.hasNext();) {
 1764  24
             String groupName = (String) it.next();
 1765   
 
 1766   
             // Update the in-memory groups
 1767  24
             if (memoryCaching && (this.groups != null)) {
 1768  18
                 Set memoryGroup = (Set) groups.get(groupName);
 1769   
 
 1770  18
                 if (memoryGroup != null) {
 1771  18
                     memoryGroup.remove(key);
 1772   
 
 1773  18
                     if (memoryGroup.isEmpty()) {
 1774  0
                         groups.remove(groupName);
 1775   
                     }
 1776   
                 }
 1777   
             }
 1778   
 
 1779   
             // Update the persistent group maps
 1780  24
             if (persist) {
 1781  24
                 Set persistentGroup = persistRetrieveGroup(groupName);
 1782   
 
 1783  24
                 if (persistentGroup != null) {
 1784  18
                     persistentGroup.remove(key);
 1785   
 
 1786  18
                     if (persistentGroup.isEmpty()) {
 1787  0
                         persistRemoveGroup(groupName);
 1788   
                     } else {
 1789  18
                         persistStoreGroup(groupName, persistentGroup);
 1790   
                     }
 1791   
                 }
 1792   
             }
 1793   
         }
 1794   
     }
 1795   
 
 1796   
     /**
 1797   
      * Updates the groups to reflect the differences between the old and new
 1798   
      * cache entries. Either of the old or new values can be <code>null</code>
 1799   
      * or contain a <code>null</code> group list, in which case the entry's
 1800   
      * groups will all be added or removed respectively.
 1801   
      *
 1802   
      * @param oldValue The old CacheEntry that is being replaced.
 1803   
      * @param newValue The new CacheEntry that is being inserted.
 1804   
      */
 1805  106
     private void updateGroups(Object oldValue, Object newValue, boolean persist) {
 1806   
         // If we have/had a CacheEntry, update the group lookups
 1807  106
         boolean oldIsCE = oldValue instanceof CacheEntry;
 1808  106
         boolean newIsCE = newValue instanceof CacheEntry;
 1809   
 
 1810  106
         if (newIsCE && oldIsCE) {
 1811  106
             updateGroups((CacheEntry) oldValue, (CacheEntry) newValue, persist);
 1812  0
         } else if (newIsCE) {
 1813  0
             updateGroups(null, (CacheEntry) newValue, persist);
 1814  0
         } else if (oldIsCE) {
 1815  0
             updateGroups((CacheEntry) oldValue, null, persist);
 1816   
         }
 1817   
     }
 1818   
 
 1819   
     /**
 1820   
      * Updates the groups to reflect the differences between the old and new cache entries.
 1821   
      * Either of the old or new values can be <code>null</code>
 1822   
      * or contain a <code>null</code> group list, in which case the entry's
 1823   
      * groups will all be added or removed respectively.
 1824   
      *
 1825   
      * @param oldValue The old CacheEntry that is being replaced.
 1826   
      * @param newValue The new CacheEntry that is being inserted.
 1827   
      */
 1828  362
     private void updateGroups(CacheEntry oldValue, CacheEntry newValue, boolean persist) {
 1829  362
         Set oldGroups = null;
 1830  362
         Set newGroups = null;
 1831   
 
 1832  362
         if (oldValue != null) {
 1833  106
             oldGroups = oldValue.getGroups();
 1834   
         }
 1835   
 
 1836  362
         if (newValue != null) {
 1837  362
             newGroups = newValue.getGroups();
 1838   
         }
 1839   
 
 1840   
         // Get the names of the groups to remove
 1841  362
         if (oldGroups != null) {
 1842  80
             Set removeFromGroups = new HashSet();
 1843   
 
 1844  80
             for (Iterator it = oldGroups.iterator(); it.hasNext();) {
 1845  134
                 String groupName = (String) it.next();
 1846   
 
 1847  134
                 if ((newGroups == null) || !newGroups.contains(groupName)) {
 1848   
                     // We need to remove this group
 1849  24
                     removeFromGroups.add(groupName);
 1850   
                 }
 1851   
             }
 1852   
 
 1853  80
             removeGroupMappings(oldValue.getKey(), removeFromGroups, persist);
 1854   
         }
 1855   
 
 1856   
         // Get the names of the groups to add
 1857  362
         if (newGroups != null) {
 1858  152
             Set addToGroups = new HashSet();
 1859   
 
 1860  152
             for (Iterator it = newGroups.iterator(); it.hasNext();) {
 1861  252
                 String groupName = (String) it.next();
 1862   
 
 1863  252
                 if ((oldGroups == null) || !oldGroups.contains(groupName)) {
 1864   
                     // We need to add this group
 1865  142
                     addToGroups.add(groupName);
 1866   
                 }
 1867   
             }
 1868   
 
 1869  152
             addGroupMappings(newValue.getKey(), addToGroups, persist);
 1870   
         }
 1871   
     }
 1872   
 
 1873   
     /**
 1874   
      * AbstractConcurrentReadCache collision list entry.
 1875   
      */
 1876   
     protected static class Entry implements Map.Entry {
 1877   
         protected final Entry next;
 1878   
         protected final Object key;
 1879   
 
 1880   
         /*
 1881   
            The use of volatile for value field ensures that
 1882   
            we can detect status changes without synchronization.
 1883   
            The other fields are never changed, and are
 1884   
            marked as final.
 1885   
         */
 1886   
         protected final int hash;
 1887   
         protected volatile Object value;
 1888   
 
 1889  528
         Entry(int hash, Object key, Object value, Entry next) {
 1890  528
             this.hash = hash;
 1891  528
             this.key = key;
 1892  528
             this.next = next;
 1893  528
             this.value = value;
 1894   
         }
 1895   
 
 1896   
         // Map.Entry Ops
 1897  0
         public Object getKey() {
 1898  0
             return key;
 1899   
         }
 1900   
 
 1901   
         /**
 1902   
          * Set the value of this entry.
 1903   
          * Note: In an entrySet or
 1904   
          * entrySet.iterator), unless the set or iterator is used under
 1905   
          * synchronization of the table as a whole (or you can otherwise
 1906   
          * guarantee lack of concurrent modification), <tt>setValue</tt>
 1907   
          * is not strictly guaranteed to actually replace the value field
 1908   
          * obtained via the <tt>get</tt> operation of the underlying hash
 1909   
          * table in multithreaded applications.  If iterator-wide
 1910   
          * synchronization is not used, and any other concurrent
 1911   
          * <tt>put</tt> or <tt>remove</tt> operations occur, sometimes
 1912   
          * even to <em>other</em> entries, then this change is not
 1913   
          * guaranteed to be reflected in the hash table. (It might, or it
 1914   
          * might not. There are no assurances either way.)
 1915   
          *
 1916   
          * @param      value   the new value.
 1917   
          * @return     the previous value, or null if entry has been detectably
 1918   
          * removed.
 1919   
          * @exception  NullPointerException  if the value is <code>null</code>.
 1920   
          *
 1921   
          **/
 1922  0
         public Object setValue(Object value) {
 1923  0
             if (value == null) {
 1924  0
                 throw new NullPointerException();
 1925   
             }
 1926   
 
 1927  0
             Object oldValue = this.value;
 1928  0
             this.value = value;
 1929   
 
 1930  0
             return oldValue;
 1931   
         }
 1932   
 
 1933   
         /**
 1934   
          * Get the value.
 1935   
          * Note: In an entrySet or entrySet.iterator,
 1936   
          * unless the set or iterator is used under synchronization of the
 1937   
          * table as a whole (or you can otherwise guarantee lack of
 1938   
          * concurrent modification), <tt>getValue</tt> <em>might</em>
 1939   
          * return null, reflecting the fact that the entry has been
 1940   
          * concurrently removed. However, there are no assurances that
 1941   
          * concurrent removals will be reflected using this method.
 1942   
          *
 1943   
          * @return     the current value, or null if the entry has been
 1944   
          * detectably removed.
 1945   
          **/
 1946  0
         public Object getValue() {
 1947  0
             return value;
 1948   
         }
 1949   
 
 1950  0
         public boolean equals(Object o) {
 1951  0
             if (!(o instanceof Map.Entry)) {
 1952  0
                 return false;
 1953   
             }
 1954   
 
 1955  0
             Map.Entry e = (Map.Entry) o;
 1956   
 
 1957  0
             if (!key.equals(e.getKey())) {
 1958  0
                 return false;
 1959   
             }
 1960   
 
 1961  0
             Object v = value;
 1962   
 
 1963  0
             return (v == null) ? (e.getValue() == null) : v.equals(e.getValue());
 1964   
         }
 1965   
 
 1966  0
         public int hashCode() {
 1967  0
             Object v = value;
 1968   
 
 1969  0
             return hash ^ ((v == null) ? 0 : v.hashCode());
 1970   
         }
 1971   
 
 1972  0
         public String toString() {
 1973  0
             return key + "=" + value;
 1974   
         }
 1975   
 
 1976  0
         protected Object clone() {
 1977  0
             return new Entry(hash, key, value, ((next == null) ? null : (Entry) next.clone()));
 1978   
         }
 1979   
     }
 1980   
 
 1981   
     protected class HashIterator implements Iterator, Enumeration {
 1982   
         protected final Entry[] tab; // snapshot of table
 1983   
         protected Entry entry = null; // current node of slot
 1984   
         protected Entry lastReturned = null; // last node returned by next
 1985   
         protected Object currentKey; // key for current node
 1986   
         protected Object currentValue; // value for current node
 1987   
         protected int index; // current slot
 1988   
 
 1989  48
         protected HashIterator() {
 1990  48
             tab = AbstractConcurrentReadCache.this.getTableForReading();
 1991  48
             index = tab.length - 1;
 1992   
         }
 1993   
 
 1994  0
         public boolean hasMoreElements() {
 1995  0
             return hasNext();
 1996   
         }
 1997   
 
 1998  120
         public boolean hasNext() {
 1999   
             /*
 2000   
               currentkey and currentValue are set here to ensure that next()
 2001   
               returns normally if hasNext() returns true. This avoids
 2002   
               surprises especially when final element is removed during
 2003   
               traversal -- instead, we just ignore the removal during
 2004   
               current traversal.
 2005   
             */
 2006  120
             for (;;) {
 2007  192
                 if (entry != null) {
 2008  72
                     Object v = entry.value;
 2009   
 
 2010  72
                     if (v != null) {
 2011  72
                         currentKey = entry.key;
 2012  72
                         currentValue = v;
 2013   
 
 2014  72
                         return true;
 2015   
                     } else {
 2016  0
                         entry = entry.next;
 2017   
                     }
 2018   
                 }
 2019   
 
 2020  120
                 while ((entry == null) && (index >= 0)) {
 2021  1536
                     entry = tab[index--];
 2022   
                 }
 2023   
 
 2024  120
                 if (entry == null) {
 2025  48
                     currentKey = currentValue = null;
 2026   
 
 2027  48
                     return false;
 2028   
                 }
 2029   
             }
 2030   
         }
 2031   
 
 2032  72
         public Object next() {
 2033  72
             if ((currentKey == null) && !hasNext()) {
 2034  0
                 throw new NoSuchElementException();
 2035   
             }
 2036   
 
 2037  72
             Object result = returnValueOfNext();
 2038  72
             lastReturned = entry;
 2039  72
             currentKey = currentValue = null;
 2040  72
             entry = entry.next;
 2041   
 
 2042  72
             return result;
 2043   
         }
 2044   
 
 2045  0
         public Object nextElement() {
 2046  0
             return next();
 2047   
         }
 2048   
 
 2049  0
         public void remove() {
 2050  0
             if (lastReturned == null) {
 2051  0
                 throw new IllegalStateException();
 2052   
             }
 2053   
 
 2054  0
             AbstractConcurrentReadCache.this.remove(lastReturned.key);
 2055   
         }
 2056   
 
 2057  0
         protected Object returnValueOfNext() {
 2058  0
             return entry;
 2059   
         }
 2060   
     }
 2061   
 
 2062   
     protected class KeyIterator extends HashIterator {
 2063  72
         protected Object returnValueOfNext() {
 2064  72
             return currentKey;
 2065   
         }
 2066   
     }
 2067   
 
 2068   
     protected class ValueIterator extends HashIterator {
 2069  0
         protected Object returnValueOfNext() {
 2070  0
             return currentValue;
 2071   
         }
 2072   
     }
 2073   
 }
 2074