Hashtable.cs source code in C# .NET

Source code for the .NET framework in C#

                        

Code:

/ FXUpdate3074 / FXUpdate3074 / 1.1 / untmp / whidbey / QFE / ndp / clr / src / BCL / System / Collections / Hashtable.cs / 1 / Hashtable.cs

                            // ==++== 
//
//   Copyright (c) Microsoft Corporation.  All rights reserved.
//
// ==--== 
/*============================================================
** 
** Class:  Hashtable 
**
** 
** Purpose: Hash table implementation
**
**
===========================================================*/ 
namespace System.Collections {
    using System; 
    using System.Runtime.Serialization; 
    using System.Security.Permissions;
    using System.Diagnostics; 
    using System.Threading;
    using System.Runtime.CompilerServices;
    using System.Runtime.ConstrainedExecution;
 
    // The Hashtable class represents a dictionary of associated keys and values
    // with constant lookup time. 
    // 
    // Objects used as keys in a hashtable must implement the GetHashCode
    // and Equals methods (or they can rely on the default implementations 
    // inherited from Object if key equality is simply reference
    // equality). Furthermore, the GetHashCode and Equals methods of
    // a key object must produce the same results given the same parameters for the
    // entire time the key is present in the hashtable. In practical terms, this 
    // means that key objects should be immutable, at least for the time they are
    // used as keys in a hashtable. 
    // 
    // When entries are added to a hashtable, they are placed into
    // buckets based on the hashcode of their keys. Subsequent lookups of 
    // keys will use the hashcode of the keys to only search a particular bucket,
    // thus substantially reducing the number of key comparisons required to find
    // an entry. A hashtable's maximum load factor, which can be specified
    // when the hashtable is instantiated, determines the maximum ratio of 
    // hashtable entries to hashtable buckets. Smaller load factors cause faster
    // average lookup times at the cost of increased memory consumption. The 
    // default maximum load factor of 1.0 generally provides the best balance 
    // between speed and size. As entries are added to a hashtable, the hashtable's
    // actual load factor increases, and when the actual load factor reaches the 
    // maximum load factor value, the number of buckets in the hashtable is
    // automatically increased by approximately a factor of two (to be precise, the
    // number of hashtable buckets is increased to the smallest prime number that
    // is larger than twice the current number of hashtable buckets). 
    //
    // Each object provides their own hash function, accessed by calling 
    // GetHashCode().  However, one can write their own object 
    // implementing IEqualityComparer and pass it to a constructor on
    // the Hashtable.  That hash function (and the equals method on the 
    // IEqualityComparer) would be used for all objects in the table.
    //
    // Changes since V1 during Whidbey:
    // *) Deprecated IHashCodeProvider, use IEqualityComparer instead.  This will 
    //    allow better performance for objects where equality checking can be
    //    done much faster than establishing an ordering between two objects, 
    //    such as an ordinal string equality 

    [DebuggerTypeProxy(typeof(System.Collections.Hashtable.HashtableDebugView))] 
    [DebuggerDisplay("Count = {Count}")]
    [System.Runtime.InteropServices.ComVisible(true)]
    [Serializable()]
    public class Hashtable : IDictionary, ISerializable, IDeserializationCallback, ICloneable { 
        /*
          Implementation Notes: 
          The generic Dictionary was copied from Hashtable's source - any 

 



 

 
 

 



 

 
 

 



 

 
 

 



 

 
 

 



 

 
 

 



*/ 

        private const String LoadFactorName = "LoadFactor"; 
        private const String VersionName = "Version"; 
        private const String ComparerName = "Comparer";
        private const String HashCodeProviderName = "HashCodeProvider"; 
        private const String HashSizeName = "HashSize";  // Must save buckets.Length
        private const String KeysName = "Keys";
        private const String ValuesName = "Values";
        private const String KeyComparerName = "KeyComparer"; 

        // Deleted entries have their key set to buckets 
 
        // The hash table data.
        // This cannot be serialised 
        private struct bucket {
            public Object key;
            public Object val;
            public int hash_coll;   // Store hash code; sign bit means there was a collision. 
        }
 
        private bucket[] buckets; 

        // The total number of entries in the hash table. 
        private  int count;

        // The total number of collision bits set in the hashtable
        private int occupancy; 

        private  int loadsize; 
        private  float loadFactor; 

        private volatile int version; 
        private volatile bool isWriterInProgress;

        private ICollection keys;
        private ICollection values; 

        private IEqualityComparer _keycomparer; 
        private Object _syncRoot; 

        [Obsolete("Please use EqualityComparer property.")] 
        protected IHashCodeProvider hcp
        {
            get
            { 
                if( _keycomparer is CompatibleComparer) {
                    return ((CompatibleComparer)_keycomparer).HashCodeProvider; 
                } 
                else if( _keycomparer == null) {
                    return null; 
                }
                else {
                    throw new ArgumentException(Environment.GetResourceString("Arg_CannotMixComparisonInfrastructure"));
                } 
            }
            set 
            { 
                if (_keycomparer is CompatibleComparer) {
                    CompatibleComparer keyComparer = (CompatibleComparer)_keycomparer; 
                    _keycomparer = new CompatibleComparer(keyComparer.Comparer, value);
                }
                else if( _keycomparer == null) {
                    _keycomparer = new CompatibleComparer((IComparer)null, value); 
                }
                else { 
                    throw new ArgumentException(Environment.GetResourceString("Arg_CannotMixComparisonInfrastructure")); 
                }
            } 
        }


        [Obsolete("Please use KeyComparer properties.")] 
        protected IComparer comparer
        { 
            get 
            {
                if( _keycomparer is CompatibleComparer) { 
                    return ((CompatibleComparer)_keycomparer).Comparer;
                }
                else if( _keycomparer == null) {
                    return null; 
                }
                else { 
                    throw new ArgumentException(Environment.GetResourceString("Arg_CannotMixComparisonInfrastructure")); 
                }
            } 
            set
            {
                if (_keycomparer is CompatibleComparer) {
                    CompatibleComparer keyComparer = (CompatibleComparer)_keycomparer; 
                    _keycomparer = new CompatibleComparer(value, keyComparer.HashCodeProvider);
                } 
                else if( _keycomparer == null) { 
                    _keycomparer = new CompatibleComparer(value, (IHashCodeProvider)null);
                } 
                else {
                    throw new ArgumentException(Environment.GetResourceString("Arg_CannotMixComparisonInfrastructure"));
                }
            } 
        }
 
        protected IEqualityComparer EqualityComparer 
        {
            get 
            {
                return _keycomparer;
            }
        } 

 
        private SerializationInfo m_siInfo; //A temporary variable which we need during deserialization. 

        // Note: this constructor is a bogus constructor that does nothing 
        // and is for use only with SyncHashtable.
        internal Hashtable( bool trash )
        {
        } 

        // Constructs a new hashtable. The hashtable is created with an initial 
        // capacity of zero and a load factor of 1.0. 
        public Hashtable() : this(0, 1.0f) {
        } 

        // Constructs a new hashtable with the given initial capacity and a load
        // factor of 1.0. The capacity argument serves as an indication of
        // the number of entries the hashtable will contain. When this number (or 
        // an approximation) is known, specifying it in the constructor can
        // eliminate a number of resizing operations that would otherwise be 
        // performed when elements are added to the hashtable. 
        //
        public Hashtable(int capacity) : this(capacity, 1.0f) { 
        }

        // Constructs a new hashtable with the given initial capacity and load
        // factor. The capacity argument serves as an indication of the 
        // number of entries the hashtable will contain. When this number (or an
        // approximation) is known, specifying it in the constructor can eliminate 
        // a number of resizing operations that would otherwise be performed when 
        // elements are added to the hashtable. The loadFactor argument
        // indicates the maximum ratio of hashtable entries to hashtable buckets. 
        // Smaller load factors cause faster average lookup times at the cost of
        // increased memory consumption. A load factor of 1.0 generally provides
        // the best balance between speed and size.
        // 
        public Hashtable(int capacity, float loadFactor) {
            if (capacity < 0) 
                throw new ArgumentOutOfRangeException("capacity", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum")); 
            if (!(loadFactor >= 0.1f && loadFactor <= 1.0f))
                throw new ArgumentOutOfRangeException("loadFactor", Environment.GetResourceString("ArgumentOutOfRange_HashtableLoadFactor", .1, 1.0)); 

            // Based on perf work, .72 is the optimal load factor for this table.
            this.loadFactor = 0.72f * loadFactor;
 
            double rawsize = capacity / this.loadFactor;
            if (rawsize > Int32.MaxValue) 
                throw new ArgumentException(Environment.GetResourceString("Arg_HTCapacityOverflow")); 

            // Avoid awfully small sizes 
            int hashsize = (rawsize > 11) ? HashHelpers.GetPrime((int)rawsize) : 11;
            buckets = new bucket[hashsize];

            loadsize = (int)(this.loadFactor * hashsize); 
            isWriterInProgress = false;
            // Based on the current algorithm, loadsize must be less than hashsize. 
            BCLDebug.Assert( loadsize < hashsize, "Invalid hashtable loadsize!"); 
        }
 
        // Constructs a new hashtable with the given initial capacity and load
        // factor. The capacity argument serves as an indication of the
        // number of entries the hashtable will contain. When this number (or an
        // approximation) is known, specifying it in the constructor can eliminate 
        // a number of resizing operations that would otherwise be performed when
        // elements are added to the hashtable. The loadFactor argument 
        // indicates the maximum ratio of hashtable entries to hashtable buckets. 
        // Smaller load factors cause faster average lookup times at the cost of
        // increased memory consumption. A load factor of 1.0 generally provides 
        // the best balance between speed and size.  The hcp argument
        // is used to specify an Object that will provide hash codes for all
        // the Objects in the table.  Using this, you can in effect override
        // GetHashCode() on each Object using your own hash function.  The 
        // comparer argument will let you specify a custom function for
        // comparing keys.  By specifying user-defined objects for hcp 
        // and comparer, users could make a hash table using strings 
        // as keys do case-insensitive lookups.
        // 
        [Obsolete("Please use Hashtable(int, float, IEqualityComparer) instead.")]
        public Hashtable(int capacity, float loadFactor, IHashCodeProvider hcp, IComparer comparer) : this(capacity, loadFactor) {
            if (hcp == null && comparer == null) {
                this._keycomparer = null; 
            }
            else { 
                this._keycomparer = new CompatibleComparer(comparer,hcp); 
            }
        } 

        public Hashtable(int capacity, float loadFactor, IEqualityComparer equalityComparer) : this(capacity, loadFactor) {
            this._keycomparer = equalityComparer;
        } 

        // Constructs a new hashtable using a custom hash function 
        // and a custom comparison function for keys.  This will enable scenarios 
        // such as doing lookups with case-insensitive strings.
        // 
        [Obsolete("Please use Hashtable(IEqualityComparer) instead.")]
        public Hashtable(IHashCodeProvider hcp, IComparer comparer) : this(0, 1.0f, hcp, comparer) {
        }
 
        public Hashtable(IEqualityComparer equalityComparer) : this(0, 1.0f, equalityComparer) {
        } 
 
        // Constructs a new hashtable using a custom hash function
        // and a custom comparison function for keys.  This will enable scenarios 
        // such as doing lookups with case-insensitive strings.
        //
        [Obsolete("Please use Hashtable(int, IEqualityComparer) instead.")]
        public Hashtable(int capacity, IHashCodeProvider hcp, IComparer comparer) 
            : this(capacity, 1.0f, hcp, comparer) {
        } 
 
        public Hashtable(int capacity, IEqualityComparer equalityComparer)
            : this(capacity, 1.0f, equalityComparer) { 
        }

        // Constructs a new hashtable containing a copy of the entries in the given
        // dictionary. The hashtable is created with a load factor of 1.0. 
        //
        public Hashtable(IDictionary d) : this(d, 1.0f) { 
        } 

        // Constructs a new hashtable containing a copy of the entries in the given 
        // dictionary. The hashtable is created with the given load factor.
        //
        public Hashtable(IDictionary d, float loadFactor)
            : this(d, loadFactor, (IEqualityComparer)null) { 
        }
 
        [Obsolete("Please use Hashtable(IDictionary, IEqualityComparer) instead.")] 
        public Hashtable(IDictionary d, IHashCodeProvider hcp, IComparer comparer)
            : this(d, 1.0f, hcp, comparer)  { 
        }

        public Hashtable(IDictionary d, IEqualityComparer equalityComparer)
            : this(d, 1.0f, equalityComparer)    { 
        }
 
        [Obsolete("Please use Hashtable(IDictionary, float, IEqualityComparer) instead.")] 
        public Hashtable(IDictionary d, float loadFactor, IHashCodeProvider hcp, IComparer comparer)
            : this((d != null ? d.Count : 0), loadFactor, hcp, comparer) { 
            if (d==null)
                throw new ArgumentNullException("d", Environment.GetResourceString("ArgumentNull_Dictionary"));

            IDictionaryEnumerator e = d.GetEnumerator(); 
            while (e.MoveNext()) Add(e.Key, e.Value);
        } 
 
        public Hashtable(IDictionary d, float loadFactor, IEqualityComparer equalityComparer)
            : this((d != null ? d.Count : 0), loadFactor, equalityComparer) { 
            if (d==null)
                throw new ArgumentNullException("d", Environment.GetResourceString("ArgumentNull_Dictionary"));

            IDictionaryEnumerator e = d.GetEnumerator(); 
            while (e.MoveNext()) Add(e.Key, e.Value);
        } 
 
        protected Hashtable(SerializationInfo info, StreamingContext context) {
            //We can't do anything with the keys and values until the entire graph has been deserialized 
            //and we have a resonable estimate that GetHashCode is not going to fail.  For the time being,
            //we'll just cache this.  The graph is not valid until OnDeserialization has been called.
            m_siInfo = info;
        } 

        // Computes the hash function:  H(key, i) = h1(key) + i*h2(key, hashSize). 
        // The out parameter seed is h1(key), while the out parameter 
        // incr is h2(key, hashSize).  Callers of this function should
        // add incr each time through a loop. 
        private uint InitHash(Object key, int hashsize, out uint seed, out uint incr) {
            // Hashcode must be positive.  Also, we must not use the sign bit, since
            // that is used for the collision bit.
            uint hashcode = (uint) GetHash(key) & 0x7FFFFFFF; 
            seed = (uint) hashcode;
            // Restriction: incr MUST be between 1 and hashsize - 1, inclusive for 
            // the modular arithmetic to work correctly.  This guarantees you'll 
            // visit every bucket in the table exactly once within hashsize
            // iterations.  Violate this and it'll cause obscure bugs forever. 
            // If you change this calculation for h2(key), update putEntry too!
            incr = (uint)(1 + (((seed >> 5) + 1) % ((uint)hashsize - 1)));
            return hashcode;
        } 

        // Adds an entry with the given key and value to this hashtable. An 
        // ArgumentException is thrown if the key is null or if the key is already 
        // present in the hashtable.
        // 
        public virtual void Add(Object key, Object value) {
            Insert(key, value, true);
        }
 
        // Removes all entries from this hashtable.
        [ReliabilityContract(Consistency.WillNotCorruptState, Cer.Success)] 
        public virtual void Clear() { 
            if (count == 0)
                return; 

            Thread.BeginCriticalRegion();
            isWriterInProgress = true;
            for (int i = 0; i < buckets.Length; i++){ 
                buckets[i].hash_coll = 0;
                buckets[i].key = null; 
                buckets[i].val = null; 
            }
 
            count = 0;
            occupancy = 0;
            UpdateVersion();
            isWriterInProgress = false; 
            Thread.EndCriticalRegion();
        } 
 
        // Clone returns a virtually identical copy of this hash table.  This does
        // a shallow copy - the Objects in the table aren't cloned, only the references 
        // to those Objects.
        public virtual Object Clone()
        {
            bucket[] lbuckets = buckets; 
            Hashtable ht = new Hashtable(count,_keycomparer);
            ht.version = version; 
            ht.loadFactor = loadFactor; 
            ht.count = 0;
 
            int bucket = lbuckets.Length;
            while (bucket > 0) {
                bucket--;
                Object keyv = lbuckets[bucket].key; 
                if ((keyv!= null) && (keyv != lbuckets)) {
                    ht[keyv] = lbuckets[bucket].val; 
                } 
            }
 
            return ht;
        }

        // Checks if this hashtable contains the given key. 
        public virtual bool Contains(Object key) {
            return ContainsKey(key); 
        } 

        // Checks if this hashtable contains an entry with the given key.  This is 
        // an O(1) operation.
        //
        public virtual bool ContainsKey(Object key) {
            if (key == null) { 
                throw new ArgumentNullException("key", Environment.GetResourceString("ArgumentNull_Key"));
            } 
 
            uint seed;
            uint incr; 
            // Take a snapshot of buckets, in case another thread resizes table
            bucket[] lbuckets = buckets;
            uint hashcode = InitHash(key, lbuckets.Length, out seed, out incr);
            int  ntry = 0; 

            bucket b; 
            int bucketNumber = (int) (seed % (uint)lbuckets.Length); 
            do {
                b = lbuckets[bucketNumber]; 
                if (b.key == null) {
                    return false;
                }
                if (((b.hash_coll &  0x7FFFFFFF) == hashcode) && 
                    KeyEquals (b.key, key))
                    return true; 
                bucketNumber = (int) (((long)bucketNumber + incr)% (uint)lbuckets.Length); 
            } while (b.hash_coll < 0 && ++ntry < lbuckets.Length);
            return false; 
        }


 
        // Checks if this hashtable contains an entry with the given value. The
        // values of the entries of the hashtable are compared to the given value 
        // using the Object.Equals method. This method performs a linear 
        // search and is thus be substantially slower than the ContainsKey
        // method. 
        //
        public virtual bool ContainsValue(Object value) {
            if (value == null) {
                for (int i = buckets.Length; --i >= 0;) { 
                    if (buckets[i].key != null && buckets[i].key != buckets && buckets[i].val == null)
                        return true; 
                } 
            }
            else { 
                for (int i = buckets.Length; --i >= 0;) {
                  Object val = buckets[i].val;
                  if (val!=null && val.Equals(value)) return true;
                } 
            }
            return false; 
        } 

        // Copies the keys of this hashtable to a given array starting at a given 
        // index. This method is used by the implementation of the CopyTo method in
        // the KeyCollection class.
        private void CopyKeys(Array array, int arrayIndex) {
            bucket[] lbuckets = buckets; 
            for (int i = lbuckets.Length; --i >= 0;) {
                Object keyv = lbuckets[i].key; 
                if ((keyv != null) && (keyv != buckets)){ 
                    array.SetValue(keyv, arrayIndex++);
                } 
            }
        }

        // Copies the keys of this hashtable to a given array starting at a given 
        // index. This method is used by the implementation of the CopyTo method in
        // the KeyCollection class. 
        private void CopyEntries(Array array, int arrayIndex) { 
            bucket[] lbuckets = buckets;
            for (int i = lbuckets.Length; --i >= 0;) { 
                Object keyv = lbuckets[i].key;
                if ((keyv != null) && (keyv != buckets)){
                    DictionaryEntry entry = new DictionaryEntry(keyv,lbuckets[i].val);
                    array.SetValue(entry, arrayIndex++); 
                }
            } 
        } 

        // Copies the values in this hash table to an array at 
        // a given index.  Note that this only copies values, and not keys.
        public virtual void CopyTo(Array array, int arrayIndex)
        {
            if (array == null) 
                throw new ArgumentNullException("array", Environment.GetResourceString("ArgumentNull_Array"));
            if (array.Rank != 1) 
                throw new ArgumentException(Environment.GetResourceString("Arg_RankMultiDimNotSupported")); 
            if (arrayIndex < 0)
                throw new ArgumentOutOfRangeException("arrayIndex", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum")); 
            if (array.Length - arrayIndex < count)
                throw new ArgumentException(Environment.GetResourceString("Arg_ArrayPlusOffTooSmall"));
            CopyEntries(array, arrayIndex);
        } 

        // Copies the values in this Hashtable to an KeyValuePairs array. 
        // KeyValuePairs is different from Dictionary Entry in that it has special 
        // debugger attributes on its fields.
 
        internal virtual KeyValuePairs[] ToKeyValuePairsArray() {
            KeyValuePairs[] array = new KeyValuePairs[count];
            int index = 0;
            bucket[] lbuckets = buckets; 
            for (int i = lbuckets.Length; --i >= 0;) {
                Object keyv = lbuckets[i].key; 
                if ((keyv != null) && (keyv != buckets)){ 
                    array[index++] = new KeyValuePairs(keyv,lbuckets[i].val);
                } 
            }

            return array;
        } 

 
        // Copies the values of this hashtable to a given array starting at a given 
        // index. This method is used by the implementation of the CopyTo method in
        // the ValueCollection class. 
        private void CopyValues(Array array, int arrayIndex) {
            bucket[] lbuckets = buckets;
            for (int i = lbuckets.Length; --i >= 0;) {
                Object keyv = lbuckets[i].key; 
                if ((keyv != null) && (keyv != buckets)){
                    array.SetValue(lbuckets[i].val, arrayIndex++); 
                } 
            }
        } 

        // Returns the value associated with the given key. If an entry with the
        // given key is not found, the returned value is null.
        // 
        public virtual Object this[Object key] {
            get { 
                if (key == null) { 
                    throw new ArgumentNullException("key", Environment.GetResourceString("ArgumentNull_Key"));
                } 
                uint seed;
                uint incr;

 
                // Take a snapshot of buckets, in case another thread does a resize
                bucket[] lbuckets = buckets; 
                uint hashcode = InitHash(key, lbuckets.Length, out seed, out incr); 
                int  ntry = 0;
 
                bucket b;
                int bucketNumber = (int) (seed % (uint)lbuckets.Length);
                do
                { 
                    int currentversion;
 
                    //     A read operation on hashtable has three steps: 
                    //        (1) calculate the hash and find the slot number.
                    //        (2) compare the hashcode, if equal, go to step 3. Otherwise end. 
                    //        (3) compare the key, if equal, go to step 4. Otherwise end.
                    //        (4) return the value contained in the bucket.
                    //     After step 3 and before step 4. A writer can kick in a remove the old item and add a new one
                    //     in the same bukcet. So in the reader we need to 

 
 

 



 
                    int spinCount = 0;
                    do { 
                        // this is violate read, following memory accesses can not be moved ahead of it. 
                        currentversion = version;
                        b = lbuckets[bucketNumber]; 

                        // The contention between reader and writer shouldn't happen frequently.
                        // But just in case this will burn CPU, yield the control of CPU if we spinned a few times.
                        // 8 is just a random number I pick. 
                        if( (++spinCount) % 8 == 0 ) {
                            Thread.Sleep(1);   // 1 means we are yeilding control to all threads, including low-priority ones. 
                        } 
                    } while ( isWriterInProgress || (currentversion != version) );
 
                    if (b.key == null) {
                        return null;
                    }
                    if (((b.hash_coll & 0x7FFFFFFF) == hashcode) && 
                        KeyEquals (b.key, key))
                        return b.val; 
                    bucketNumber = (int) (((long)bucketNumber + incr)% (uint)lbuckets.Length); 
                } while (b.hash_coll < 0 && ++ntry < lbuckets.Length);
                return null; 
            }

            set {
                Insert(key, value, false); 
            }
        } 
 
        // Increases the bucket count of this hashtable. This method is called from
        // the Insert method when the actual load factor of the hashtable reaches 
        // the upper limit specified when the hashtable was constructed. The number
        // of buckets in the hashtable is increased to the smallest prime number
        // that is larger than twice the current number of buckets, and the entries
        // in the hashtable are redistributed into the new buckets using the cached 
        // hashcodes.
        private void expand()  { 
 
            int rawsize = HashHelpers.GetPrime(buckets.Length*2);    // buckets.Length*2 will not overflow
            rehash(rawsize); 
        }

        // We occationally need to rehash the table to clean up the collision bits.
        private void rehash() { 
            rehash( buckets.Length );
        } 
 
        private void UpdateVersion() {
            // Version might become negative when version is Int32.MaxValue, but the oddity will be still be correct. 
            // So we don't need to special case this.
            version++;
        }
 
        [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
        private void rehash( int newsize ) { 
 
            // reset occupancy
            occupancy=0; 

            // Don't replace any internal state until we've finished adding to the
            // new bucket[].  This serves two purposes:
            //   1) Allow concurrent readers to see valid hashtable contents 
            //      at all times
            //   2) Protect against an OutOfMemoryException while allocating this 
            //      new bucket[]. 
            bucket[] newBuckets = new bucket[newsize];
 
            // rehash table into new buckets
            int nb;
            for (nb = 0; nb < buckets.Length; nb++){
                bucket oldb = buckets[nb]; 
                if ((oldb.key != null) && (oldb.key != buckets)){
                    putEntry(newBuckets, oldb.key, oldb.val, oldb.hash_coll & 0x7FFFFFFF); 
                } 
            }
 
            // New bucket[] is good to go - replace buckets and other internal state.
            Thread.BeginCriticalRegion();
            isWriterInProgress = true;
            buckets = newBuckets; 
            loadsize = (int)(loadFactor * newsize);
            UpdateVersion(); 
            isWriterInProgress = false; 
            Thread.EndCriticalRegion();
            // minimun size of hashtable is 11 now and maximum loadFactor is 0.72 now. 
            BCLDebug.Assert(loadsize < newsize, "Our current implementaion means this is not possible.");
            return;
        }
 
        // Returns an enumerator for this hashtable.
        // If modifications made to the hashtable while an enumeration is 
        // in progress, the MoveNext and Current methods of the 
        // enumerator will throw an exception.
        // 
        IEnumerator IEnumerable.GetEnumerator() {
            return new HashtableEnumerator(this, HashtableEnumerator.DictEntry);
        }
 
        // Returns a dictionary enumerator for this hashtable.
        // If modifications made to the hashtable while an enumeration is 
        // in progress, the MoveNext and Current methods of the 
        // enumerator will throw an exception.
        // 
        public virtual IDictionaryEnumerator GetEnumerator() {
            return new HashtableEnumerator(this, HashtableEnumerator.DictEntry);
        }
 
        // Internal method to get the hash code for an Object.  This will call
        // GetHashCode() on each object if you haven't provided an IHashCodeProvider 
        // instance.  Otherwise, it calls hcp.GetHashCode(obj). 
        protected virtual int GetHash(Object key)
        { 
            if (_keycomparer != null)
                return _keycomparer.GetHashCode(key);
            return key.GetHashCode();
        } 

        // Is this Hashtable read-only? 
        public virtual bool IsReadOnly { 
            get { return false; }
        } 

        public virtual bool IsFixedSize {
            get { return false; }
        } 

        // Is this Hashtable synchronized?  See SyncRoot property 
        public virtual bool IsSynchronized { 
            get { return false; }
        } 

        // Internal method to compare two keys.  If you have provided an IComparer
        // instance in the constructor, this method will call comparer.Compare(item, key).
        // Otherwise, it will call item.Equals(key). 
        //
        protected virtual bool KeyEquals(Object item, Object key) 
        { 
            BCLDebug.Assert(key != null, "key can't be null here!");
            if( Object.ReferenceEquals(buckets, item)) { 
                return false;
            }

            if (_keycomparer != null) 
                return _keycomparer.Equals(item, key);
            return item == null ? false : item.Equals(key); 
        } 

        // Returns a collection representing the keys of this hashtable. The order 
        // in which the returned collection represents the keys is unspecified, but
        // it is guaranteed to be          buckets = newBuckets; the same order in which a collection returned by
        // GetValues represents the values of the hashtable.
        // 
        // The returned collection is live in the sense that any changes
        // to the hash table are reflected in this collection.  It is not 
        // a static copy of all the keys in the hash table. 
        //
        public virtual ICollection Keys { 
            get {
                if (keys == null) keys = new KeyCollection(this);
                    return keys;
            } 
        }
 
        // Returns a collection representing the values of this hashtable. The 
        // order in which the returned collection represents the values is
        // unspecified, but it is guaranteed to be the same order in which a 
        // collection returned by GetKeys represents the keys of the
        // hashtable.
        //
        // The returned collection is live in the sense that any changes 
        // to the hash table are reflected in this collection.  It is not
        // a static copy of all the keys in the hash table. 
        // 
        public virtual ICollection Values {
            get { 
                if (values == null) values = new ValueCollection(this);
                    return values;
            }
        } 

        // Inserts an entry into this hashtable. This method is called from the Set 
        // and Add methods. If the add parameter is true and the given key already 
        // exists in the hashtable, an exception is thrown.
        [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] 
        private void Insert (Object key, Object nvalue, bool add) {
            if (key == null) {
                throw new ArgumentNullException("key", Environment.GetResourceString("ArgumentNull_Key"));
            } 
            if (count >= loadsize) {
                expand(); 
            } 
            else if(occupancy > loadsize && count > 100) {
                rehash(); 
            }

            uint seed;
            uint incr; 
            // Assume we only have one thread writing concurrently.  Modify
            // buckets to contain new data, as long as we insert in the right order. 
            uint hashcode = InitHash(key, buckets.Length, out seed, out incr); 
            int  ntry = 0;
            int emptySlotNumber = -1; // We use the empty slot number to cache the first empty slot. We chose to reuse slots 
            // create by remove that have the collision bit set over using up new slots.
            int bucketNumber = (int) (seed % (uint)buckets.Length);
            do {
 
                // Set emptySlot number to current bucket if it is the first available bucket that we have seen
                // that once contained an entry and also has had a collision. 
                // We need to search this entire collision chain because we have to ensure that there are no 
                // duplicate entries in the table.
                if (emptySlotNumber == -1 && (buckets[bucketNumber].key == buckets) && (buckets[bucketNumber].hash_coll < 0))//(((buckets[bucketNumber].hash_coll & unchecked(0x80000000))!=0))) 
                    emptySlotNumber = bucketNumber;

                // Insert the key/value pair into this bucket if this bucket is empty and has never contained an entry
                // OR 
                // This bucket once contained an entry but there has never been a collision
                if ((buckets[bucketNumber].key == null) || 
                    (buckets[bucketNumber].key == buckets && ((buckets[bucketNumber].hash_coll & unchecked(0x80000000))==0))) { 

                    // If we have found an available bucket that has never had a collision, but we've seen an available 
                    // bucket in the past that has the collision bit set, use the previous bucket instead
                    if (emptySlotNumber != -1) // Reuse slot
                        bucketNumber = emptySlotNumber;
 
                    // We pretty much have to insert in this order.  Don't set hash
                    // code until the value & key are set appropriately. 
                    Thread.BeginCriticalRegion(); 
                    isWriterInProgress = true;
                    buckets[bucketNumber].val = nvalue; 
                    buckets[bucketNumber].key  = key;
                    buckets[bucketNumber].hash_coll |= (int) hashcode;
                    count++;
                    UpdateVersion(); 
                    isWriterInProgress = false;
                    Thread.EndCriticalRegion(); 
                    return; 
                }
 
                // The current bucket is in use
                // OR
                // it is available, but has had the collision bit set and we have already found an available bucket
                if (((buckets[bucketNumber].hash_coll & 0x7FFFFFFF) == hashcode) && 
                    KeyEquals (buckets[bucketNumber].key, key)) {
                    if (add) { 
                        throw new ArgumentException(Environment.GetResourceString("Argument_AddingDuplicate__", buckets[bucketNumber].key, key)); 
                    }
                    Thread.BeginCriticalRegion(); 
                    isWriterInProgress = true;
                    buckets[bucketNumber].val = nvalue;
                    UpdateVersion();
                    isWriterInProgress = false; 
                    Thread.EndCriticalRegion();
                    return; 
                } 

                // The current bucket is full, and we have therefore collided.  We need to set the collision bit 
                // UNLESS
                // we have remembered an available slot previously.
                if (emptySlotNumber == -1) {// We don't need to set the collision bit here since we already have an empty slot
                    if( buckets[bucketNumber].hash_coll >= 0 ) { 
                        buckets[bucketNumber].hash_coll |= unchecked((int)0x80000000);
                        occupancy++; 
                    } 
                }
                bucketNumber = (int) (((long)bucketNumber + incr)% (uint)buckets.Length); 
            } while (++ntry < buckets.Length);

            // This code is here if and only if there were no buckets without a collision bit set in the entire table
            if (emptySlotNumber != -1) 
            {
                // We pretty much have to insert in this order.  Don't set hash 
                // code until the value & key are set appropriately. 
                Thread.BeginCriticalRegion();
                isWriterInProgress = true; 
                buckets[emptySlotNumber].val = nvalue;
                buckets[emptySlotNumber].key  = key;
                buckets[emptySlotNumber].hash_coll |= (int) hashcode;
                count++; 
                UpdateVersion();
                isWriterInProgress = false; 
                Thread.EndCriticalRegion(); 
                return;
            } 

            // If you see this assertt, make sure load factor & count are reasonable.
            // Then verify that our double hash function (h2, described at top of file)
            // meets the requirements described above. You should never see this assertt. 
            BCLDebug.Assert(false, "hash table insert failed!  Load factor too high, or our double hashing function is incorrect.");
            throw new InvalidOperationException(Environment.GetResourceString("InvalidOperation_HashInsertFailed")); 
        } 

        private void putEntry (bucket[] newBuckets, Object key, Object nvalue, int hashcode) 
        {
            BCLDebug.Assert(hashcode >= 0, "hashcode >= 0");  // make sure collision bit (sign bit) wasn't set.

            uint seed = (uint) hashcode; 
            uint incr = (uint)(1 + (((seed >> 5) + 1) % ((uint)newBuckets.Length - 1)));
            int bucketNumber = (int) (seed % (uint)newBuckets.Length); 
            do { 

                if ((newBuckets[bucketNumber].key == null) || (newBuckets[bucketNumber].key == buckets)) { 
                    newBuckets[bucketNumber].val = nvalue;
                    newBuckets[bucketNumber].key = key;
                    newBuckets[bucketNumber].hash_coll |= hashcode;
                    return; 
                }
 
                if( newBuckets[bucketNumber].hash_coll >= 0 ) { 
                newBuckets[bucketNumber].hash_coll |= unchecked((int)0x80000000);
                    occupancy++; 
                }
                bucketNumber = (int) (((long)bucketNumber + incr)% (uint)newBuckets.Length);
            } while (true);
        } 

        // Removes an entry from this hashtable. If an entry with the specified 
        // key exists in the hashtable, it is removed. An ArgumentException is 
        // thrown if the key is null.
        // 
        [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
        public virtual void Remove(Object key) {
            if (key == null) {
                throw new ArgumentNullException("key", Environment.GetResourceString("ArgumentNull_Key")); 
            }
            uint seed; 
            uint incr; 
            // Assuming only one concurrent writer, write directly into buckets.
            uint hashcode = InitHash(key, buckets.Length, out seed, out incr); 
            int ntry = 0;

            bucket b;
            int bn = (int) (seed % (uint)buckets.Length);  // bucketNumber 
            do {
                b = buckets[bn]; 
                if (((b.hash_coll & 0x7FFFFFFF) == hashcode) && 
                    KeyEquals (b.key, key)) {
                    Thread.BeginCriticalRegion(); 
                    isWriterInProgress = true;
                    // Clear hash_coll field, then key, then value
                    buckets[bn].hash_coll &= unchecked((int)0x80000000);
                    if (buckets[bn].hash_coll != 0) { 
                        buckets[bn].key = buckets;
                    } 
                    else { 
                        buckets[bn].key = null;
                    } 
                    buckets[bn].val = null;  // Free object references sooner & simplify ContainsValue.
                    count--;
                    UpdateVersion();
                    isWriterInProgress = false; 
                    Thread.EndCriticalRegion();
                    return; 
                } 
                bn = (int) (((long)bn + incr)% (uint)buckets.Length);
            } while (b.hash_coll < 0 && ++ntry < buckets.Length); 

            //throw new ArgumentException(Environment.GetResourceString("Arg_RemoveArgNotFound"));
        }
 
        // Returns the object to synchronize on for this hash table.
        public virtual Object SyncRoot { 
            get { 
                if( _syncRoot == null) {
                    System.Threading.Interlocked.CompareExchange(ref _syncRoot, new Object(), null); 
                }
                return _syncRoot;
            }
        } 

        // Returns the number of associations in this hashtable. 
        // 
        public virtual int Count {
            get { return count; } 
        }

        // Returns a thread-safe wrapper for a Hashtable.
        // 
        [HostProtection(Synchronization=true)]
        public static Hashtable Synchronized(Hashtable table) { 
            if (table==null) 
                throw new ArgumentNullException("table");
            return new SyncHashtable(table); 
        }

        //
        // The ISerializable Implementation 
        //
 
        public virtual void GetObjectData(SerializationInfo info, StreamingContext context) { 
            if (info==null) {
                throw new ArgumentNullException("info"); 
            }
            info.AddValue(LoadFactorName, loadFactor);
            info.AddValue(VersionName, version);
 
            //
            // We need to maintain serialization compatibility with Everett and RTM. 
            // If the comparer is null or a compatible comparer, serialize Hashtable 
            // in a format that can be deserialized on Everett and RTM.
            // 
#pragma warning disable 618
            if( _keycomparer == null) {
                info.AddValue(ComparerName, null,typeof(IComparer));
                info.AddValue(HashCodeProviderName, null, typeof(IHashCodeProvider)); 
            }
            else if(_keycomparer is CompatibleComparer) { 
                CompatibleComparer c = _keycomparer as CompatibleComparer; 
                info.AddValue(ComparerName, c.Comparer, typeof(IComparer));
                info.AddValue(HashCodeProviderName, c.HashCodeProvider, typeof(IHashCodeProvider)); 
            }
            else{
                info.AddValue(KeyComparerName, _keycomparer, typeof(IEqualityComparer));
            } 
#pragma warning restore 618
 
            info.AddValue(HashSizeName, buckets.Length); //This is the length of the bucket array. 
            Object [] serKeys = new Object[count];
            Object [] serValues = new Object[count]; 
            CopyKeys(serKeys, 0);
            CopyValues(serValues,0);
            info.AddValue(KeysName, serKeys, typeof(Object[]));
            info.AddValue(ValuesName, serValues, typeof(Object[])); 
        }
 
        // 
        // DeserializationEvent Listener
        // 
        public virtual void OnDeserialization(Object sender) {
            if (buckets!=null) {
                return; //Somebody had a dependency on this hashtable and fixed us up before the ObjectManager got to it.
            } 
            if (m_siInfo==null) {
                throw new SerializationException(Environment.GetResourceString("Serialization_InvalidOnDeser")); 
            } 

            int hashsize = 0; 
            IComparer c = null;

#pragma warning disable 618
            IHashCodeProvider hcp = null; 
#pragma warning restore 618
 
            Object [] serKeys = null; 
            Object [] serValues = null;
 
            SerializationInfoEnumerator enumerator = m_siInfo.GetEnumerator();

            while( enumerator.MoveNext())
            { 
                switch( enumerator.Name)
                { 
                    case LoadFactorName: 
                        loadFactor = m_siInfo.GetSingle(LoadFactorName);
                        break; 
                    case HashSizeName:
                        hashsize = m_siInfo.GetInt32(HashSizeName);
                        break;
                    case KeyComparerName: 
                        _keycomparer = (IEqualityComparer)m_siInfo.GetValue(KeyComparerName, typeof(IEqualityComparer));
                        break; 
                    case ComparerName: 
                        c = (IComparer)m_siInfo.GetValue(ComparerName, typeof(IComparer));
                        break; 
                    case HashCodeProviderName:
#pragma warning disable 618
                        hcp = (IHashCodeProvider)m_siInfo.GetValue(HashCodeProviderName, typeof(IHashCodeProvider));
#pragma warning restore 618 
                        break;
                    case KeysName: 
                        serKeys = (Object[])m_siInfo.GetValue(KeysName, typeof(Object[])); 
                        break;
                    case ValuesName: 
                        serValues = (Object[])m_siInfo.GetValue(ValuesName, typeof(Object[]));
                        break;
                }
            } 

            loadsize   = (int)(loadFactor*hashsize); 
 
            // V1 object doesn't has _keycomparer field.
            if ( (_keycomparer == null) && ( (c != null) || (hcp != null) ) ){ 
                _keycomparer = new CompatibleComparer(c,hcp);
            }

            buckets = new bucket[hashsize]; 

            if (serKeys==null) { 
                throw new SerializationException(Environment.GetResourceString("Serialization_MissingKeys")); 
            }
            if (serValues==null) { 
                throw new SerializationException(Environment.GetResourceString("Serialization_MissingValues"));
            }
            if (serKeys.Length!=serValues.Length) {
                throw new SerializationException(Environment.GetResourceString("Serialization_KeyValueDifferentSizes")); 
            }
            for (int i=0; i 0) { 
                    bucket--;
                    Object keyv = hashtable.buckets[bucket].key; 
                    if ((keyv!= null) && (keyv != hashtable.buckets)) {
                        currentKey = keyv;
                        currentValue = hashtable.buckets[bucket].val;
                        current = true; 
                        return true;
                    } 
                } 
                current = false;
                return false; 
            }

            public virtual DictionaryEntry Entry {
                get { 
                    if (current == false) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumOpCantHappen));
                    return new DictionaryEntry(currentKey, currentValue); 
                } 
            }
 

            public virtual Object Current {
                get {
                    if (current == false) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumOpCantHappen)); 

                    if (getObjectRetType==Keys) 
                        return currentKey; 
                    else if (getObjectRetType==Values)
                        return currentValue; 
                    else
                        return new DictionaryEntry(currentKey, currentValue);
                }
            } 

            public virtual Object Value { 
                get { 
                    if (current == false) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumOpCantHappen));
                    return currentValue; 
                }
            }

            public virtual void Reset() { 
                if (version != hashtable.version) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumFailedVersion));
                current = false; 
                bucket = hashtable.buckets.Length; 
                currentKey = null;
                currentValue = null; 
            }
        }

        // internal debug view class for hashtable 
        internal class HashtableDebugView {
            private Hashtable hashtable; 
 
            public HashtableDebugView( Hashtable hashtable) {
                if( hashtable == null) { 
                    throw new ArgumentNullException( "hashtable");
                }

                this.hashtable = hashtable; 
            }
 
 
            [DebuggerBrowsable(DebuggerBrowsableState.RootHidden)]
            public KeyValuePairs[] Items { 
                get {
                    return hashtable.ToKeyValuePairsArray();
                }
            } 
        }
    } 
 
    internal static class HashHelpers
    { 
        // Table of prime numbers to use as hash table sizes.
        // The entry used for capacity is the smallest prime number in this aaray
        // that is larger than twice the previous capacity.
 
        internal static readonly int[] primes = {
            3, 7, 11, 17, 23, 29, 37, 47, 59, 71, 89, 107, 131, 163, 197, 239, 293, 353, 431, 521, 631, 761, 919, 
            1103, 1327, 1597, 1931, 2333, 2801, 3371, 4049, 4861, 5839, 7013, 8419, 10103, 12143, 14591, 
            17519, 21023, 25229, 30293, 36353, 43627, 52361, 62851, 75431, 90523, 108631, 130363, 156437,
            187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403, 968897, 1162687, 1395263, 
            1674319, 2009191, 2411033, 2893249, 3471899, 4166287, 4999559, 5999471, 7199369};

        [ReliabilityContract(Consistency.WillNotCorruptState, Cer.Success)]
        internal static bool IsPrime(int candidate) 
        {
            if ((candidate & 1) != 0) 
            { 
                int limit = (int)Math.Sqrt (candidate);
                for (int divisor = 3; divisor <= limit; divisor+=2) 
                {
                    if ((candidate % divisor) == 0)
                        return false;
                } 
                return true;
            } 
            return (candidate == 2); 
        }
 
        [ReliabilityContract(Consistency.WillNotCorruptState, Cer.Success)]
        internal static int GetPrime(int min)
        {
            if (min < 0) 
                throw new ArgumentException(Environment.GetResourceString("Arg_HTCapacityOverflow"));
 
            for (int i = 0; i < primes.Length; i++) 
            {
                int prime = primes[i]; 
                if (prime >= min) return prime;
            }

            //outside of our predefined table. 
            //compute the hard way.
            for (int i = (min | 1); i < Int32.MaxValue;i+=2) 
            { 
                if (IsPrime(i))
                    return i; 
            }
            return min;
        }
    } 
}

// File provided for Reference Use Only by Microsoft Corporation (c) 2007.
// Copyright (c) Microsoft Corporation. All rights reserved.
                        

Link Menu

Network programming in C#, Network Programming in VB.NET, Network Programming in .NET
This book is available now!
Buy at Amazon US or
Buy at Amazon UK