Code:
/ Dotnetfx_Vista_SP2 / Dotnetfx_Vista_SP2 / 8.0.50727.4016 / DEVDIV / depot / DevDiv / releases / whidbey / NetFxQFE / ndp / fx / src / Data / System / Data / Odbc / OdbcDataReader.cs / 1 / OdbcDataReader.cs
//------------------------------------------------------------------------------ //// Copyright (c) Microsoft Corporation. All rights reserved. // //[....] //[....] //----------------------------------------------------------------------------- using System; using System.Collections; using System.Collections.Generic; using System.ComponentModel; using System.Data; using System.Data.Common; using System.Data.ProviderBase; using System.Diagnostics; using System.Globalization; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; // StringBuilder using System.Threading; namespace System.Data.Odbc { #if WINFSInternalOnly internal #else public #endif sealed class OdbcDataReader : DbDataReader { private OdbcCommand command; private int recordAffected = -1; private FieldNameLookup _fieldNameLookup; private DbCache dataCache; private enum HasRowsStatus { DontKnow = 0, HasRows = 1, HasNoRows = 2, } private HasRowsStatus _hasRows = HasRowsStatus.DontKnow; private bool _isClosed; private bool _isRead; private bool _isValidResult; private bool _noMoreResults; private bool _noMoreRows; private bool _skipReadOnce; private int _hiddenColumns; // number of hidden columns private CommandBehavior _commandBehavior; // track current row and column, will be set on the first Fetch call private int _row = -1; private int _column = -1; // used to track position in field for sucessive reads in case of Sequential Access private long _sequentialBytesRead; private static int _objectTypeCount; // Bid counter internal readonly int ObjectID = System.Threading.Interlocked.Increment(ref _objectTypeCount); // the statement handle here is just a copy of the statement handle owned by the command // the DataReader must not free the statement handle. this is done by the command // private MetaData[] metadata; private DataTable schemaTable; // MDAC 68336 private string _cmdText; // get a copy in case the command text on the command is changed ... private CMDWrapper _cmdWrapper; internal OdbcDataReader(OdbcCommand command, CMDWrapper cmdWrapper, CommandBehavior commandbehavior) { OdbcConnection.VerifyExecutePermission(); Debug.Assert(command != null, "Command null on OdbcDataReader ctor"); this.command = command; _commandBehavior = commandbehavior; _cmdText = command.CommandText; // get a copy in case the command text on the command is changed ... _cmdWrapper = cmdWrapper; } private CNativeBuffer Buffer { get { CNativeBuffer value = _cmdWrapper._dataReaderBuf; if (null == value) { Debug.Assert(false, "object is disposed"); throw new ObjectDisposedException(GetType().Name); } return value; } } private OdbcConnection Connection { get { if (null != _cmdWrapper) { return _cmdWrapper.Connection; } else { return null; } } } internal OdbcCommand Command { get { return this.command; } set { this.command=value; } } private OdbcStatementHandle StatementHandle { get { return _cmdWrapper.StatementHandle; } } private OdbcStatementHandle KeyInfoStatementHandle { get { return _cmdWrapper.KeyInfoStatement; } } internal bool IsBehavior(CommandBehavior behavior) { return IsCommandBehavior(behavior); } internal bool IsCancelingCommand { get { if (this.command != null) { return command.Canceling; } return false; } } internal bool IsNonCancelingCommand { get { if (this.command != null) { return !command.Canceling; } return false; } } override public int Depth { get { if (IsClosed) { // MDAC 63669 throw ADP.DataReaderClosed("Depth"); } return 0; } } override public int FieldCount { get { if (IsClosed) { // MDAC 63669 throw ADP.DataReaderClosed("FieldCount"); } if (_noMoreResults) { // MDAC 93325 return 0; } if (null == this.dataCache) { Int16 cColsAffected; ODBC32.RetCode retcode = this.FieldCountNoThrow(out cColsAffected); if(retcode != ODBC32.RetCode.SUCCESS) { Connection.HandleError(StatementHandle, retcode); } } return ((null != this.dataCache) ? this.dataCache._count : 0); } } // HasRows // // Use to detect wheter there are one ore more rows in the result without going through Read // May be called at any time // Basically it calls Read and sets a flag so that the actual Read call will be skipped once // override public bool HasRows { get { if (IsClosed) { throw ADP.DataReaderClosed("HasRows"); } if (_hasRows == HasRowsStatus.DontKnow){ Read(); // _skipReadOnce = true; // need to skip Read once because we just did it } return (_hasRows == HasRowsStatus.HasRows); } } internal ODBC32.RetCode FieldCountNoThrow(out Int16 cColsAffected) { if (IsCancelingCommand) { cColsAffected = 0; return ODBC32.RetCode.ERROR; } ODBC32.RetCode retcode = StatementHandle.NumberOfResultColumns(out cColsAffected); if (retcode == ODBC32.RetCode.SUCCESS) { _hiddenColumns = 0; if (IsCommandBehavior(CommandBehavior.KeyInfo)) { // we need to search for the first hidden column // if (!Connection.ProviderInfo.NoSqlSoptSSNoBrowseTable && !Connection.ProviderInfo.NoSqlSoptSSHiddenColumns) { for (int i=0; i0) { // case 2 - fixed-size types have _bufferSize set to positive value // call GetValue(i) as before the fix of SQLBUVSTS01:110664 // note, when SQLGetData is called for a fixed length type, the buffer size is always ignored and // the data will always be read off the wire return Convert.IsDBNull(GetValue(i)); } else { // case 3 - the data has variable type, use GetData with 0 length // GetData will return false only if the object cached as DbNull // GetData will put DbNull in cache only if the SQLGetData returns SQL_NULL_DATA, otherwise it does not change it int dummy; return !GetData(i, GetSqlType(i)._sql_c, 0 /* zero buffer read, no data should be lost */, out dummy); } } override public Byte GetByte(int i) { return (Byte)internalGetByte(i); } private object internalGetByte(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if (GetData(i, ODBC32.SQL_C.UTINYINT)) { this.dataCache[i] = Buffer.ReadByte(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Char GetChar(int i) { return (Char)internalGetChar(i); } private object internalGetChar(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if (GetData(i, ODBC32.SQL_C.WCHAR)) { this.dataCache[i] = Buffer.ReadChar(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Int16 GetInt16(int i) { return (Int16)internalGetInt16(i); } private object internalGetInt16(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if (GetData(i, ODBC32.SQL_C.SSHORT)) { this.dataCache[i] = Buffer.ReadInt16(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Int32 GetInt32(int i) { return (Int32)internalGetInt32(i); } private object internalGetInt32(int i){ if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.SLONG)){ this.dataCache[i] = Buffer.ReadInt32(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Int64 GetInt64(int i) { return (Int64)internalGetInt64(i); } // --------------------------------------------------------------------------------------------- // // internal internalGetInt64 // ------------------------- // Get Value of type SQL_BIGINT // Since the driver refused to accept the type SQL_BIGINT we read that // as SQL_C_WCHAR and convert it back to the Int64 data type // private object internalGetInt64(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if (GetData(i, ODBC32.SQL_C.WCHAR)) { string value = (string)Buffer.MarshalToManaged(0, ODBC32.SQL_C.WCHAR, ODBC32.SQL_NTS); this.dataCache[i] = Int64.Parse(value, CultureInfo.InvariantCulture); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public bool GetBoolean(int i) { return (bool) internalGetBoolean(i); } private object internalGetBoolean(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.BIT)){ this.dataCache[i] = Buffer.MarshalToManaged(0, ODBC32.SQL_C.BIT, -1); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public float GetFloat(int i) { return (float)internalGetFloat(i); } private object internalGetFloat(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.REAL)){ this.dataCache[i] = Buffer.ReadSingle(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } public DateTime GetDate(int i) { return (DateTime)internalGetDate(i); } private object internalGetDate(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.TYPE_DATE)){ this.dataCache[i] = Buffer.MarshalToManaged(0, ODBC32.SQL_C.TYPE_DATE, -1); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public DateTime GetDateTime(int i) { return (DateTime)internalGetDateTime(i); } private object internalGetDateTime(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.TYPE_TIMESTAMP)){ this.dataCache[i] = Buffer.MarshalToManaged(0, ODBC32.SQL_C.TYPE_TIMESTAMP, -1); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public decimal GetDecimal(int i) { return (decimal)internalGetDecimal(i); } // --------------------------------------------------------------------------------------------- // // internal GetDecimal // ------------------- // Get Value of type SQL_DECIMAL or SQL_NUMERIC // Due to provider incompatibilities with SQL_DECIMAL or SQL_NUMERIC types we always read the value // as SQL_C_WCHAR and convert it back to the Decimal data type // private object internalGetDecimal(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.WCHAR )){ string s = null; try { s = (string)Buffer.MarshalToManaged(0, ODBC32.SQL_C.WCHAR, ODBC32.SQL_NTS); this.dataCache[i] = Decimal.Parse(s, System.Globalization.CultureInfo.InvariantCulture); } catch(OverflowException e) { this.dataCache[i] = s; throw e; } } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public double GetDouble(int i) { return (double)internalGetDouble(i); } private object internalGetDouble(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.DOUBLE)){ this.dataCache[i] = Buffer.ReadDouble(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Guid GetGuid(int i) { return (Guid)internalGetGuid(i); } private object internalGetGuid(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.GUID)){ this.dataCache[i] = Buffer.ReadGuid(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public String GetString(int i) { return (String)internalGetString(i); } private object internalGetString(int i) { if(_isRead) { if(this.dataCache.AccessIndex(i) == null) { // Obtain _ALL_ the characters // Note: We can bind directly as WCHAR in ODBC and the DM will convert to and // from ANSI if not supported by the driver. // // Note: The driver always returns the raw length of the data, minus the // terminator. This means that our buffer length always includes the terminator // charactor, so when determining which characters to count, and if more data // exists, it should not take the terminator into effect. // CNativeBuffer buffer = Buffer; // that does not make sense unless we expect four byte terminators int cbMaxData = buffer.Length - 4; // The first time GetData returns the true length (so we have to min it). // We also pass in the true length to the marshal function since there could be // embedded nulls // int lengthOrIndicator; if (GetData(i, ODBC32.SQL_C.WCHAR, buffer.Length - 2, out lengthOrIndicator)) { // RFC 50002644: we do not expect negative values from GetData call except SQL_NO_TOTAL(== -4) // note that in general you should not trust third-party providers so such asserts should be // followed by exception. I did not add it now to avoid breaking change Debug.Assert(lengthOrIndicator >= 0 || lengthOrIndicator == ODBC32.SQL_NO_TOTAL, "unexpected lengthOrIndicator value"); if (lengthOrIndicator <= cbMaxData && (ODBC32.SQL_NO_TOTAL != lengthOrIndicator)) { // all data read? good! Directly marshal to a string and we're done // string strdata = buffer.PtrToStringUni(0, Math.Min(lengthOrIndicator, cbMaxData) / 2); this.dataCache[i] = strdata; return strdata; } // We need to chunk the data // Char[] buffer for the junks // StringBuilder for the actual string // Char[] rgChars = new Char[cbMaxData / 2]; // RFC 50002644: negative value cannot be used for capacity. // in case of SQL_NO_TOTAL, set the capacity to cbMaxData, StringBuilder will automatically reallocate // its internal buffer when appending more data int cbBuilderInitialCapacity = (lengthOrIndicator == ODBC32.SQL_NO_TOTAL) ? cbMaxData : lengthOrIndicator; StringBuilder builder = new StringBuilder(cbBuilderInitialCapacity / 2); bool gotData; int cchJunk; int cbActual = cbMaxData; int cbMissing = (ODBC32.SQL_NO_TOTAL == lengthOrIndicator) ? -1 : lengthOrIndicator - cbActual; do { cchJunk = cbActual / 2; buffer.ReadChars(0, rgChars, 0, cchJunk); builder.Append(rgChars, 0, cchJunk); if(0 == cbMissing) { break; // done } gotData = GetData(i, ODBC32.SQL_C.WCHAR, buffer.Length - 2, out lengthOrIndicator); // RFC 50002644: we do not expect negative values from GetData call except SQL_NO_TOTAL(== -4) // note that in general you should not trust third-party providers so such asserts should be // followed by exception. I did not add it now to avoid breaking change Debug.Assert(lengthOrIndicator >= 0 || lengthOrIndicator == ODBC32.SQL_NO_TOTAL, "unexpected lengthOrIndicator value"); if (ODBC32.SQL_NO_TOTAL != lengthOrIndicator) { cbActual = Math.Min(lengthOrIndicator, cbMaxData); if(0 < cbMissing) { cbMissing -= cbActual; } else { // it is a last call to SqlGetData that started with SQL_NO_TOTAL // the last call to SqlGetData must always return the length of the // data, not zero or SqlNoTotal (see Odbc Programmers Reference) Debug.Assert(cbMissing == -1 && lengthOrIndicator <= cbMaxData); cbMissing = 0; } } } while(gotData); this.dataCache[i] = builder.ToString(); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } public TimeSpan GetTime(int i) { return (TimeSpan)internalGetTime(i); } private object internalGetTime(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.TYPE_TIME)){ this.dataCache[i] = Buffer.MarshalToManaged(0, ODBC32.SQL_C.TYPE_TIME, -1); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } private void SetCurrentRowColumnInfo(int row, int column) { if (_row != row || _column != column) { _row = row; _column = column; // reset the blob reader when moved to new column _sequentialBytesRead = 0; } } override public long GetBytes(int i, long dataIndex, byte[] buffer, int bufferIndex, int length) { return GetBytesOrChars(i, dataIndex, buffer, false /* bytes buffer */, bufferIndex, length); } override public long GetChars(int i, long dataIndex, char[] buffer, int bufferIndex, int length) { return GetBytesOrChars(i, dataIndex, buffer, true /* chars buffer */, bufferIndex, length); } // unify the implementation of GetChars and GetBytes to prevent code duplicate private long GetBytesOrChars(int i, long dataIndex, Array buffer, bool isCharsBuffer, int bufferIndex, int length) { if (IsClosed) { throw ADP.DataReaderNoData(); } if(!_isRead) { throw ADP.DataReaderNoData(); } if(dataIndex < 0 ) { // test only for negative value here, Int32.MaxValue will be validated only in case of random access throw ADP.ArgumentOutOfRange("dataIndex"); } if(bufferIndex < 0) { throw ADP.ArgumentOutOfRange("bufferIndex"); } if(length < 0) { throw ADP.ArgumentOutOfRange("length"); } string originalMethodName = isCharsBuffer ? "GetChars" : "GetBytes"; // row/column info will be reset only if changed SetCurrentRowColumnInfo(_row, i); // Possible cases: // 1. random access, user asks for the value first time: bring it and cache the value // 2. random access, user already queried the value: use the cache // 3. sequential access, cache exists: user already read this value using different method (it becomes cached) // use the cache - preserve the original behavior to minimize regression risk // 4. sequential access, no cache: (fixed now) user reads the bytes/chars in sequential order (no cache) object cachedObj = null; // The cached object (if there is one) // Get cached object, ensure the correct type using explicit cast, to preserve same behavior as before if (isCharsBuffer) cachedObj = (string)this.dataCache[i]; else cachedObj = (byte[])this.dataCache[i]; bool isRandomAccess = !IsCommandBehavior(CommandBehavior.SequentialAccess); if (isRandomAccess || (cachedObj != null)) { // random access (cases 1 or 2) and sequential access with cache (case 3) // preserve the original behavior as before the fix if (Int32.MaxValue < dataIndex) { // indices greater than allocable size are not supported in random access // (negative value is already tested in the beginning of ths function) throw ADP.ArgumentOutOfRange("dataIndex"); } if(cachedObj == null) { // case 1, get the value and cache it // internalGetString/internalGetBytes will get the entire value and cache it, // since we are not in SequentialAccess (isRandomAccess is true), it is OK if (isCharsBuffer) { cachedObj = (String)internalGetString(i); Debug.Assert((cachedObj != null), "internalGetString should always return non-null or raise exception"); } else { cachedObj = (byte[])internalGetBytes(i); Debug.Assert((cachedObj != null), "internalGetBytes should always return non-null or raise exception"); } // continue to case 2 } // after this point the value is cached (case 2 or 3) // if it is DbNull, cast exception will be raised (same as before the 110664 fix) int cachedObjectLength = isCharsBuffer ? ((string)cachedObj).Length : ((byte[])cachedObj).Length; // the user can ask for the length of the field by passing in a null pointer for the buffer if (buffer == null) { // return the length if that's all what user needs return cachedObjectLength; } // user asks for bytes if (length == 0) { return 0; // Nothing to do ... } if (dataIndex >= cachedObjectLength) { // no more bytes to read // see also MDAC bug 73298 return 0; } int lengthFromDataIndex = cachedObjectLength - (int)dataIndex; int lengthOfCopy = Math.Min(lengthFromDataIndex, length); // silently reduce the length to avoid regression from EVERETT lengthOfCopy = Math.Min(lengthOfCopy, buffer.Length - bufferIndex); if (lengthOfCopy <= 0) return 0; // MDAC Bug 73298 if (isCharsBuffer) ((string)cachedObj).CopyTo((int)dataIndex, (char[])buffer, bufferIndex, lengthOfCopy); else Array.Copy((byte[])cachedObj, (int)dataIndex, (byte[])buffer, bufferIndex, lengthOfCopy); return lengthOfCopy; } else { // sequential access, case 4 // SQLBU:532243 -- For SequentialAccess we need to read a chunk of // data and not cache it. // Note: If the object was previous cached (see case 3 above), the function will go thru 'if' path, to minimize // regressions // the user can ask for the length of the field by passing in a null pointer for the buffer if (buffer == null) { // Get len. of remaining data from provider if (isCharsBuffer) return GetFieldLength(i, ODBC32.SQL_C.WCHAR); else return GetFieldLength(i, ODBC32.SQL_C.BINARY); } // check if user tries to read data that was already received // if yes, this violates 'sequential access' if ((isCharsBuffer && dataIndex < _sequentialBytesRead / 2) || (!isCharsBuffer && dataIndex < _sequentialBytesRead)) { // backward reading is not allowed in sequential access throw ADP.NonSeqByteAccess( dataIndex, _sequentialBytesRead, originalMethodName ); } // note that it is actually not possible to read with an offset (dataIndex) // therefore, adjust the data index relative to number of bytes already read if (isCharsBuffer) dataIndex -= _sequentialBytesRead / 2; else dataIndex -= _sequentialBytesRead; if (dataIndex > 0) { // user asked to skip bytes - it is OK, even in case of sequential access // forward the stream by dataIndex bytes/chars int charsOrBytesRead = readBytesOrCharsSequentialAccess(i, null, isCharsBuffer, 0, dataIndex); if (charsOrBytesRead < dataIndex) { // the stream ended before we forwarded to the requested index, stop now return 0; } } // ODBC driver now points to the correct position, start filling the user buffer from now // Make sure we don't overflow the user provided buffer // Note: SqlDataReader will raise exception if there is no enough room for length requested. // In case of ODBC, I decided to keep this consistent with random access after consulting with PM. length = Math.Min(length, buffer.Length - bufferIndex); if (length <= 0) return 0; // nothing to read // fill the user's buffer return readBytesOrCharsSequentialAccess(i, buffer, isCharsBuffer, bufferIndex, length); } } // fill the user's buffer (char[] or byte[], depending on isCharsBuffer) // if buffer is null, just skip the bytesOrCharsLength bytes or chars private int readBytesOrCharsSequentialAccess(int i, Array buffer, bool isCharsBuffer, int bufferIndex, long bytesOrCharsLength) { Debug.Assert(bufferIndex >= 0, "Negative buffer index"); Debug.Assert(bytesOrCharsLength >= 0, "Negative number of bytes or chars to read"); // validated by the caller Debug.Assert(buffer == null || bytesOrCharsLength <= (buffer.Length - bufferIndex), "Not enough space in user's buffer"); int totalBytesOrCharsRead = 0; string originalMethodName = isCharsBuffer ? "GetChars" : "GetBytes"; // we need length in bytes, b/c that is what SQLGetData expects long cbLength = (isCharsBuffer)? checked(bytesOrCharsLength * 2) : bytesOrCharsLength; // continue reading from the driver until we fill the user's buffer or until no more data is available // the data is pumped first into the internal native buffer and after that copied into the user's one if buffer is not null CNativeBuffer internalNativeBuffer = this.Buffer; // read the data in loop up to th user's length // if the data size is less than requested or in case of error, the while loop will stop in the middle while (cbLength > 0) { // max data to be read, in bytes, not including null-terminator for WCHARs int cbReadMax; // read from the driver bool isNotDbNull; int cbTotal; // read either bytes or chars, depending on the method called if (isCharsBuffer) { // for WCHAR buffers, we need to leave space for null-terminator (2 bytes) // reserve 2 bytes for null-terminator and 2 bytes to prevent assert in GetData // if SQL_NO_TOTAL is returned, this ammount is read from the wire, in bytes cbReadMax = (int)Math.Min(cbLength, internalNativeBuffer.Length - 4); // SQLGetData will always append it - we do not to copy it to user's buffer isNotDbNull = GetData(i, ODBC32.SQL_C.WCHAR, cbReadMax + 2, out cbTotal); } else { // reserve 2 bytes to prevent assert in GetData // when querying bytes, no need to reserve space for null cbReadMax = (int)Math.Min(cbLength, internalNativeBuffer.Length - 2); isNotDbNull = GetData(i, ODBC32.SQL_C.BINARY, cbReadMax, out cbTotal); } if (!isNotDbNull) { // DbNull received, neither GetBytes nor GetChars should be used with DbNull value // two options // 1. be consistent with SqlDataReader, raise SqlNullValueException // 2. be consistent with other Get* methods of OdbcDataReader and raise InvalidCastException // after consulting with Himanshu (PM), decided to go with option 2 (raise cast exception) // the below line will cause proper InvalidCastException, as expected, since DbNull is not convertible to byte[] or char[] object dummy; if (isCharsBuffer) dummy = (string)this.dataCache[i]; else dummy = (byte[])this.dataCache[i]; // execution will not go thru this point } int cbRead; // will hold number of bytes read in this loop bool noDataRemained = false; if (cbTotal == 0) { // no bytes read, stop break; } else if (ODBC32.SQL_NO_TOTAL == cbTotal) { // the driver has filled the internal buffer, but the length of remained data is still unknown // we will continue looping until SQLGetData indicates the end of data or user buffer is fully filled cbRead = cbReadMax; } else { Debug.Assert((cbTotal > 0), "GetData returned negative value, which is not SQL_NO_TOTAL"); // GetData uses SQLGetData, which StrLen_or_IndPtr (cbTotal in our case) to the current buf + remained buf (if any) if (cbTotal > cbReadMax) { // in this case the amount of bytes/chars read will be the max requested (and more bytes can be read) cbRead = cbReadMax; } else { // SQLGetData read all the available data, no more remained // continue processing this chunk and stop cbRead = cbTotal; noDataRemained = true; } } _sequentialBytesRead += cbRead; // update internal state and copy the data to user's buffer if (isCharsBuffer) { int cchRead = cbRead / 2; if (buffer != null) { internalNativeBuffer.ReadChars(0, (char[])buffer, bufferIndex, cchRead); bufferIndex += cchRead; } totalBytesOrCharsRead += cchRead; } else { if (buffer != null) { internalNativeBuffer.ReadBytes(0, (byte[])buffer, bufferIndex, cbRead); bufferIndex += cbRead; } totalBytesOrCharsRead += cbRead; } cbLength -= cbRead; // stop if no data remained if (noDataRemained) break; } return totalBytesOrCharsRead; } private object internalGetBytes(int i) { if(this.dataCache.AccessIndex(i) == null) { // Obtain _ALL_ the bytes... // The first time GetData returns the true length (so we have to min it). Byte[] rgBytes; int cbBufferLen = Buffer.Length - 4; int cbActual; int cbOffset = 0; if(GetData(i, ODBC32.SQL_C.BINARY, cbBufferLen, out cbActual)) { CNativeBuffer buffer = Buffer; if(ODBC32.SQL_NO_TOTAL != cbActual) { rgBytes = new Byte[cbActual]; Buffer.ReadBytes(0, rgBytes, cbOffset, Math.Min(cbActual, cbBufferLen)); // Chunking. The data may be larger than our native buffer. In which case // instead of growing the buffer (out of control), we will read in chunks to // reduce memory footprint size. while(cbActual > cbBufferLen) { // The first time GetData returns the true length. Then successive calls // return the remaining data. bool flag = GetData(i, ODBC32.SQL_C.BINARY, cbBufferLen, out cbActual); Debug.Assert(flag, "internalGetBytes - unexpected invalid result inside if-block"); cbOffset += cbBufferLen; buffer.ReadBytes(0, rgBytes, cbOffset, Math.Min(cbActual, cbBufferLen)); } } else { List junkArray = new List (); int junkSize; int totalSize = 0; do { junkSize = (ODBC32.SQL_NO_TOTAL != cbActual) ? cbActual : cbBufferLen; rgBytes = new Byte[junkSize]; totalSize += junkSize; buffer.ReadBytes(0, rgBytes, 0, junkSize); junkArray.Add(rgBytes); } while ((ODBC32.SQL_NO_TOTAL == cbActual) && GetData(i, ODBC32.SQL_C.BINARY, cbBufferLen, out cbActual)); rgBytes = new Byte[totalSize]; foreach(Byte[] junk in junkArray) { junk.CopyTo(rgBytes, cbOffset); cbOffset += junk.Length; } } // always update the cache this.dataCache[i] = rgBytes; } } return this.dataCache[i]; } // GetColAttribute // --------------- // [IN] iColumn ColumnNumber // [IN] v3FieldId FieldIdentifier of the attribute for version3 drivers (>=3.0) // [IN] v2FieldId FieldIdentifier of the attribute for version2 drivers (<3.0) // // returns the value of the FieldIdentifier field of the column // or -1 if the FieldIdentifier wasn't supported by the driver // private SQLLEN GetColAttribute(int iColumn, ODBC32.SQL_DESC v3FieldId, ODBC32.SQL_COLUMN v2FieldId, ODBC32.HANDLER handler) { Int16 cchNameLength = 0; SQLLEN numericAttribute; ODBC32.RetCode retcode; // protect against dead connection, dead or canceling command. if ((Connection == null) || _cmdWrapper.Canceling) { return -1; } //Ordinals are 1:base in odbc OdbcStatementHandle stmt = StatementHandle; if (Connection.IsV3Driver) { retcode = stmt.ColumnAttribute(iColumn+1, (short)v3FieldId, Buffer, out cchNameLength, out numericAttribute); } else if (v2FieldId != (ODBC32.SQL_COLUMN)(-1)) { retcode = stmt.ColumnAttribute(iColumn+1, (short)v2FieldId, Buffer, out cchNameLength, out numericAttribute); } else { return 0; } if (retcode != ODBC32.RetCode.SUCCESS) { if (retcode == ODBC32.RetCode.ERROR) { if ("HY091" == Command.GetDiagSqlState()) { Connection.FlagUnsupportedColAttr(v3FieldId, v2FieldId); } } if(handler == ODBC32.HANDLER.THROW) { Connection.HandleError(stmt, retcode); } return -1; } return numericAttribute; } // GetColAttributeStr // --------------- // [IN] iColumn ColumnNumber // [IN] v3FieldId FieldIdentifier of the attribute for version3 drivers (>=3.0) // [IN] v2FieldId FieldIdentifier of the attribute for version2 drivers (<3.0) // // returns the stringvalue of the FieldIdentifier field of the column // or null if the string returned was empty or if the FieldIdentifier wasn't supported by the driver // private String GetColAttributeStr(int i, ODBC32.SQL_DESC v3FieldId, ODBC32.SQL_COLUMN v2FieldId, ODBC32.HANDLER handler) { ODBC32.RetCode retcode; Int16 cchNameLength = 0; SQLLEN numericAttribute; CNativeBuffer buffer = Buffer; buffer.WriteInt16(0, 0); OdbcStatementHandle stmt = StatementHandle; // protect against dead connection if (Connection == null || _cmdWrapper.Canceling || stmt == null) { return ""; } if (Connection.IsV3Driver) { retcode = stmt.ColumnAttribute(i+1, (short)v3FieldId, buffer, out cchNameLength, out numericAttribute); } else if (v2FieldId != (ODBC32.SQL_COLUMN)(-1)) { retcode = stmt.ColumnAttribute(i+1, (short)v2FieldId, buffer, out cchNameLength, out numericAttribute); } else { return null; } if((retcode != ODBC32.RetCode.SUCCESS) || (cchNameLength == 0)) { if (retcode == ODBC32.RetCode.ERROR) { if ("HY091" == Command.GetDiagSqlState()) { Connection.FlagUnsupportedColAttr(v3FieldId, v2FieldId); } } if(handler == ODBC32.HANDLER.THROW) { Connection.HandleError(stmt, retcode); } return null; } string retval = buffer.PtrToStringUni(0, cchNameLength/2 /*cch*/); return retval; } // todo: Another 3.0 only attribute that is guaranteed to fail on V2 driver. // need to special case this for V2 drivers. // private String GetDescFieldStr(int i, ODBC32.SQL_DESC attribute, ODBC32.HANDLER handler) { Int32 numericAttribute = 0; // protect against dead connection, dead or canceling command. if ((Connection == null) || _cmdWrapper.Canceling) { return ""; } // APP_PARAM_DESC is a (ODBCVER >= 0x0300) attribute if (!Connection.IsV3Driver) { Debug.Assert (false, "Non-V3 driver. Must not call GetDescFieldStr"); return null; } ODBC32.RetCode retcode; CNativeBuffer buffer = Buffer; // Need to set the APP_PARAM_DESC values here using(OdbcDescriptorHandle hdesc = new OdbcDescriptorHandle(StatementHandle, ODBC32.SQL_ATTR.APP_PARAM_DESC)) { //SQLGetDescField retcode = hdesc.GetDescriptionField(i+1, attribute, buffer, out numericAttribute); //Since there are many attributes (column, statement, etc), that may or may not be //supported, we don't want to throw (which obtains all errorinfo, marshals strings, //builds exceptions, etc), in common cases, unless we absolutly need this info... if((retcode != ODBC32.RetCode.SUCCESS) || (numericAttribute == 0)) { if (retcode == ODBC32.RetCode.ERROR) { if ("HY091" == Command.GetDiagSqlState()) { Connection.FlagUnsupportedColAttr(attribute, (ODBC32.SQL_COLUMN)0); } } if(handler == ODBC32.HANDLER.THROW) { Connection.HandleError(StatementHandle, retcode); } return null; } } string retval = buffer.PtrToStringUni(0, numericAttribute/2 /*cch*/); return retval; } // GetFieldLength // This will return the correct number of items in the field. // private int GetFieldLength(int i, ODBC32.SQL_C sqlctype) { Debug.Assert(((sqlctype == ODBC32.SQL_C.BINARY)||(sqlctype == ODBC32.SQL_C.WCHAR)), "Illegal Type for GetFieldLength"); if (IsCancelingCommand) throw ADP.DataReaderNoData(); Debug.Assert (null != StatementHandle, "Statement handle is null in DateReader"); IntPtr cbActual = IntPtr.Zero; int cb = 0; if (sqlctype== ODBC32.SQL_C.WCHAR) { cb = 2; } //SQLGetData Debug.Assert((Buffer.Length>=cb),"Unexpected size of _buffer"); CNativeBuffer buffer = Buffer; ODBC32.RetCode retcode = StatementHandle.GetData( (i+1), //Ordinals are 1:base in odbc sqlctype, buffer, cb, out cbActual); if(retcode != ODBC32.RetCode.SUCCESS && retcode != ODBC32.RetCode.SUCCESS_WITH_INFO && retcode != ODBC32.RetCode.NO_DATA) Connection.HandleError(StatementHandle, retcode); if (sqlctype== ODBC32.SQL_C.WCHAR) { return (int)cbActual/2; } return (int)cbActual; } private bool GetData(int i, ODBC32.SQL_C sqlctype) { // Never call GetData with anything larger than _buffer.Length-2. // We keep reallocating native buffers and it kills performance!!! int dummy; return GetData(i, sqlctype, Buffer.Length - 4, out dummy); } /// /// retrieves the data into this.Buffer. /// * If the data is DbNull, the value be also cached and false is returned. /// * if the data is not DbNull, the value is not cached and true is returned /// ///false if value is DbNull, true otherwise private bool GetData(int i, ODBC32.SQL_C sqlctype, int cb, out int cbActualOut) { IntPtr cbActual = IntPtr.Zero; // Length or an indicator value if (IsCancelingCommand){ throw ADP.DataReaderNoData(); } Debug.Assert (null != StatementHandle, "Statement handle is null in DateReader"); // Never call GetData with anything larger than _buffer.Length-2. // We keep reallocating native buffers and it kills performance!!! Debug.Assert(cb <= Buffer.Length-2, "GetData needs to Reallocate. Perf bug"); // SQLGetData CNativeBuffer buffer = Buffer; ODBC32.RetCode retcode = StatementHandle.GetData( (i+1), // Column ordinals start at 1 in odbc sqlctype, buffer, cb, out cbActual); switch (retcode) { case ODBC32.RetCode.SUCCESS: break; case ODBC32.RetCode.SUCCESS_WITH_INFO: if ((Int32)cbActual == ODBC32.SQL_NO_TOTAL) { break; } // devnote: don't we want to fire an event? break; default: Connection.HandleError(StatementHandle, retcode); break; } // reset the current row and column SetCurrentRowColumnInfo(_row, i); // SQL_NULL_DATA if(cbActual == (IntPtr)ODBC32.SQL_NULL_DATA) { this.dataCache[i] = DBNull.Value; cbActualOut = 0; return false; } //Return the actual size (for chunking scenarios) cbActualOut = (int)cbActual; return true; } override public bool Read() { if (IsClosed) { throw ADP.DataReaderClosed("Read"); } if (IsCancelingCommand) { _isRead = false; return false; } // HasRows needs to call into Read so we don't want to read on the actual Read call if (_skipReadOnce){ _skipReadOnce = false; return _isRead; } if (_noMoreRows || _noMoreResults || IsCommandBehavior(CommandBehavior.SchemaOnly)) return false; if (!_isValidResult) { return false; } ODBC32.RetCode retcode; //SQLFetch is only valid to call for row returning queries //We get: [24000]Invalid cursor state. So we could either check the count //ahead of time (which is cached), or check on error and compare error states. //Note: SQLFetch is also invalid to be called on a prepared (schemaonly) statement //SqlFetch retcode = StatementHandle.Fetch(); switch(retcode) { case ODBC32.RetCode.SUCCESS_WITH_INFO: Connection.HandleErrorNoThrow(StatementHandle, retcode); _hasRows = HasRowsStatus.HasRows; _isRead = true; break; case ODBC32.RetCode.SUCCESS: _hasRows = HasRowsStatus.HasRows; _isRead = true; break; case ODBC32.RetCode.NO_DATA: _isRead = false; if (_hasRows == HasRowsStatus.DontKnow) { _hasRows = HasRowsStatus.HasNoRows; } break; default: Connection.HandleError(StatementHandle, retcode); break; } //Null out previous cached row values. this.dataCache.FlushValues(); // if CommandBehavior == SingleRow we set _noMoreResults to true so that following reads will fail if (IsCommandBehavior(CommandBehavior.SingleRow)) { _noMoreRows = true; // no more rows, set to -1 SetCurrentRowColumnInfo(-1, 0); } else { // move to the next row SetCurrentRowColumnInfo(_row + 1, 0); } return _isRead; } // Called by odbccommand when executed for the first time internal void FirstResult() { Int16 cCols; SQLLEN cRowsAffected; cRowsAffected = GetRowCount(); // get rowcount of the current resultset (if any) CalculateRecordsAffected(cRowsAffected); // update recordsaffected ODBC32.RetCode retcode = FieldCountNoThrow(out cCols); if ((retcode == ODBC32.RetCode.SUCCESS) && (cCols == 0)) { NextResult(); } else { this._isValidResult = true; } } override public bool NextResult() { return NextResult(false, false); } private bool NextResult(bool disposing, bool allresults) { // if disposing, loop through all the remaining results and ignore error messages // if allresults, loop through all results and collect all error messages for a single exception // callers are via Close(false, true), Dispose(true, false), NextResult(false,false) Debug.Assert(!disposing || !allresults, "both disposing & allresults are true"); const int MaxConsecutiveFailure = 2000; // see WebData 72126 for why more than 1000 SQLLEN cRowsAffected; Int16 cColsAffected; ODBC32.RetCode retcode, firstRetCode = ODBC32.RetCode.SUCCESS; bool hasMoreResults; bool hasColumns = false; bool singleResult = IsCommandBehavior(CommandBehavior.SingleResult); if (IsClosed) { throw ADP.DataReaderClosed("NextResult"); } _fieldNameLookup = null; if (IsCancelingCommand || _noMoreResults) { return false; } //Blow away the previous cache (since the next result set will have a different shape, //different schema data, and different data. _isRead = false; _hasRows = HasRowsStatus.DontKnow; _fieldNameLookup = null; this.metadata = null; this.schemaTable = null; int loop = 0; // infinite loop protection, max out after 2000 consecutive failed results OdbcErrorCollection errors = null; // SQLBU 342112 do { _isValidResult = false; retcode = StatementHandle.MoreResults(); hasMoreResults = ((retcode == ODBC32.RetCode.SUCCESS) ||(retcode == ODBC32.RetCode.SUCCESS_WITH_INFO)); if (retcode == ODBC32.RetCode.SUCCESS_WITH_INFO) { Connection.HandleErrorNoThrow(StatementHandle, retcode); } else if (!disposing && (retcode != ODBC32.RetCode.NO_DATA) && (ODBC32.RetCode.SUCCESS != retcode)) { // allow for building comulative error messages. if (null == errors) { firstRetCode = retcode; errors = new OdbcErrorCollection(); } ODBC32.GetDiagErrors(errors, null, StatementHandle, retcode); ++loop; } if (!disposing && hasMoreResults) { loop = 0; cRowsAffected = GetRowCount(); // get rowcount of the current resultset (if any) CalculateRecordsAffected(cRowsAffected); // update recordsaffected if (!singleResult) { // update row- and columncount FieldCountNoThrow(out cColsAffected); hasColumns = (0 != cColsAffected); _isValidResult = hasColumns; } } } while ((!singleResult && hasMoreResults && !hasColumns) // repeat for results with no columns || ((ODBC32.RetCode.NO_DATA != retcode) && allresults && (loop < MaxConsecutiveFailure)) // or process all results until done || (singleResult && hasMoreResults)); // or for any result in singelResult mode if (MaxConsecutiveFailure <= loop) { Bid.Trace("2000 consecutive failed results"); } if(retcode == ODBC32.RetCode.NO_DATA) { this.dataCache = null; _noMoreResults = true; } if (null != errors) { Debug.Assert(!disposing, "errors while disposing"); errors.SetSource(Connection.Driver); OdbcException exception = OdbcException.CreateException(errors, firstRetCode); Connection.ConnectionIsAlive(exception); throw exception; } return (hasMoreResults); } private void BuildMetaDataInfo() { int count = FieldCount; MetaData[] metaInfos = new MetaData[count]; List qrytables; bool needkeyinfo = IsCommandBehavior(CommandBehavior.KeyInfo); bool isKeyColumn; bool isHidden; ODBC32.SQL_NULLABILITY nullable; if (needkeyinfo) qrytables = new List (); else qrytables = null; // Find out all the metadata info, not all of this info will be available in all cases // for(int i=0; i 0) ) { List .Enumerator tablesEnum = qrytables.GetEnumerator(); QualifiedTableName qualifiedTableName = new QualifiedTableName(Connection.QuoteChar(ADP.GetSchemaTable)); while(tablesEnum.MoveNext()) { // Find the primary keys, identity and autincrement columns qualifiedTableName.Table = tablesEnum.Current; if (RetrieveKeyInfo(needkeyinfo, qualifiedTableName, false) <= 0) { RetrieveKeyInfo(needkeyinfo, qualifiedTableName, true); } } } else { // Some drivers ( < 3.x ?) do not provide base table information. In this case try to // find it by parsing the statement QualifiedTableName qualifiedTableName = new QualifiedTableName(Connection.QuoteChar(ADP.GetSchemaTable), GetTableNameFromCommandText()); if (!ADP.IsEmpty(qualifiedTableName.Table)) { // fxcop SetBaseTableNames(qualifiedTableName); if (RetrieveKeyInfo(needkeyinfo, qualifiedTableName, false) <= 0) { RetrieveKeyInfo(needkeyinfo, qualifiedTableName, true); } } } } } private DataTable NewSchemaTable() { DataTable schematable = new DataTable("SchemaTable"); schematable.Locale = CultureInfo.InvariantCulture; schematable.MinimumCapacity = this.FieldCount; //Schema Columns DataColumnCollection columns = schematable.Columns; columns.Add(new DataColumn("ColumnName", typeof(System.String))); columns.Add(new DataColumn("ColumnOrdinal", typeof(System.Int32))); // UInt32 columns.Add(new DataColumn("ColumnSize", typeof(System.Int32))); // UInt32 columns.Add(new DataColumn("NumericPrecision", typeof(System.Int16))); // UInt16 columns.Add(new DataColumn("NumericScale", typeof(System.Int16))); columns.Add(new DataColumn("DataType", typeof(System.Object))); columns.Add(new DataColumn("ProviderType", typeof(System.Int32))); columns.Add(new DataColumn("IsLong", typeof(System.Boolean))); columns.Add(new DataColumn("AllowDBNull", typeof(System.Boolean))); columns.Add(new DataColumn("IsReadOnly", typeof(System.Boolean))); columns.Add(new DataColumn("IsRowVersion", typeof(System.Boolean))); columns.Add(new DataColumn("IsUnique", typeof(System.Boolean))); columns.Add(new DataColumn("IsKey", typeof(System.Boolean))); columns.Add(new DataColumn("IsAutoIncrement", typeof(System.Boolean))); columns.Add(new DataColumn("BaseSchemaName", typeof(System.String))); columns.Add(new DataColumn("BaseCatalogName", typeof(System.String))); columns.Add(new DataColumn("BaseTableName", typeof(System.String))); columns.Add(new DataColumn("BaseColumnName", typeof(System.String))); // MDAC Bug 79231 foreach (DataColumn column in columns) { column.ReadOnly = true; } return schematable; } // The default values are already defined in DbSchemaRows (see DbSchemaRows.cs) so there is no need to set any default value // override public DataTable GetSchemaTable() { if (IsClosed) { // MDAC 68331 throw ADP.DataReaderClosed("GetSchemaTable"); // can't use closed connection } if (_noMoreResults) { return null; // no more results } if (null != this.schemaTable) { return this.schemaTable; // return cached schematable } //Delegate, to have the base class setup the structure DataTable schematable = NewSchemaTable(); if (FieldCount == 0) { return schematable; } if (this.metadata == null) { BuildMetaDataInfo(); } DataColumn columnName = schematable.Columns["ColumnName"]; DataColumn columnOrdinal = schematable.Columns["ColumnOrdinal"]; DataColumn columnSize = schematable.Columns["ColumnSize"]; DataColumn numericPrecision = schematable.Columns["NumericPrecision"]; DataColumn numericScale = schematable.Columns["NumericScale"]; DataColumn dataType = schematable.Columns["DataType"]; DataColumn providerType = schematable.Columns["ProviderType"]; DataColumn isLong = schematable.Columns["IsLong"]; DataColumn allowDBNull = schematable.Columns["AllowDBNull"]; DataColumn isReadOnly = schematable.Columns["IsReadOnly"]; DataColumn isRowVersion = schematable.Columns["IsRowVersion"]; DataColumn isUnique = schematable.Columns["IsUnique"]; DataColumn isKey = schematable.Columns["IsKey"]; DataColumn isAutoIncrement = schematable.Columns["IsAutoIncrement"]; DataColumn baseSchemaName = schematable.Columns["BaseSchemaName"]; DataColumn baseCatalogName = schematable.Columns["BaseCatalogName"]; DataColumn baseTableName = schematable.Columns["BaseTableName"]; DataColumn baseColumnName = schematable.Columns["BaseColumnName"]; //Populate the rows (1 row for each column) int count = FieldCount; for(int i=0; i = 264, "Native buffer to small (_buffer.Length < 264)"); RuntimeHelpers.PrepareConstrainedRegions(); try { buffer.DangerousAddRef(ref mustRelease); if (needkeyinfo) { if (!Connection.ProviderInfo.NoSqlPrimaryKeys) { // Get the primary keys retcode = KeyInfoStatementHandle.PrimaryKeys( qualifiedTableName.Catalog, qualifiedTableName.Schema, qualifiedTableName.GetTable(quoted)); if ((retcode == ODBC32.RetCode.SUCCESS) || (retcode == ODBC32.RetCode.SUCCESS_WITH_INFO)) { bool noUniqueKey = false; // We are only interested in column name buffer.WriteInt16(0, 0); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_PRIMARYKEYS.COLUMNNAME), // Column Number ODBC32.SQL_C.WCHAR, buffer.PtrOffset(0, 256), (IntPtr)256, buffer.PtrOffset(256, IntPtr.Size).Handle); while (ODBC32.RetCode.SUCCESS == (retcode = KeyInfoStatementHandle.Fetch())) { cbActual = buffer.ReadIntPtr(256); columnname = buffer.PtrToStringUni(0, (int)cbActual/2/*cch*/); ordinal = this.GetOrdinalFromBaseColName(columnname); if (ordinal != -1) { keyColumns ++; this.metadata[ordinal].isKeyColumn = true; this.metadata[ordinal].isUnique = true; this.metadata[ordinal].isNullable = false; this.metadata[ordinal].baseTableName = qualifiedTableName.Table; if (this.metadata[ordinal].baseColumnName == null) { this.metadata[ordinal].baseColumnName = columnname; } } else { noUniqueKey = true; break; // no need to go over the remaining columns anymore } } // // if we got keyinfo from the column we dont even get to here! // // reset isUnique flag if the key(s) are not unique // if (noUniqueKey) { foreach (MetaData metadata in this.metadata) { metadata.isKeyColumn = false; } } // Unbind the column retcode = KeyInfoStatementHandle.BindColumn3( (short)(ODBC32.SQL_PRIMARYKEYS.COLUMNNAME), // SQLUSMALLINT ColumnNumber ODBC32.SQL_C.WCHAR, // SQLSMALLINT TargetType buffer.DangerousGetHandle()); // SQLLEN * StrLen_or_Ind } else { if ("IM001" == Command.GetDiagSqlState()) { Connection.ProviderInfo.NoSqlPrimaryKeys = true; } } } if (keyColumns == 0) { // SQLPrimaryKeys did not work. Have to use the slower SQLStatistics to obtain key information KeyInfoStatementHandle.MoreResults(); keyColumns += RetrieveKeyInfoFromStatistics(qualifiedTableName, quoted); } KeyInfoStatementHandle.MoreResults(); } // Get the special columns for version retcode = KeyInfoStatementHandle.SpecialColumns(qualifiedTableName.GetTable(quoted)); if ((retcode == ODBC32.RetCode.SUCCESS) || (retcode == ODBC32.RetCode.SUCCESS_WITH_INFO)) { // We are only interested in column name cbActual = IntPtr.Zero; buffer.WriteInt16(0, 0); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_SPECIALCOLUMNSET.COLUMN_NAME), ODBC32.SQL_C.WCHAR, buffer.PtrOffset(0, 256), (IntPtr)256, buffer.PtrOffset(256, IntPtr.Size).Handle); while (ODBC32.RetCode.SUCCESS == (retcode = KeyInfoStatementHandle.Fetch())) { cbActual = buffer.ReadIntPtr(256); columnname = buffer.PtrToStringUni(0, (int)cbActual/2/*cch*/); ordinal = this.GetOrdinalFromBaseColName(columnname); if (ordinal != -1) { this.metadata[ordinal].isRowVersion = true; if (this.metadata[ordinal].baseColumnName == null) { this.metadata[ordinal].baseColumnName = columnname; } } } // Unbind the column retcode = KeyInfoStatementHandle.BindColumn3( (short)(ODBC32.SQL_SPECIALCOLUMNSET.COLUMN_NAME), ODBC32.SQL_C.WCHAR, buffer.DangerousGetHandle()); retcode = KeyInfoStatementHandle.MoreResults(); } else { // i've seen "DIAG [HY000] [Microsoft][ODBC SQL Server Driver]Connection is busy with results for another hstmt (0) " // how did we get here? SqlServer does not allow a second handle (Keyinfostmt) anyway... // /* string msg = "Unexpected failure of SQLSpecialColumns. Code = " + Command.GetDiagSqlState(); Debug.Assert (false, msg); */ } } finally { if (mustRelease) { buffer.DangerousRelease(); } } return keyColumns; } // Uses SQLStatistics to retrieve key information for a table private int RetrieveKeyInfoFromStatistics(QualifiedTableName qualifiedTableName, bool quoted) { ODBC32.RetCode retcode; String columnname = String.Empty; String indexname = String.Empty; String currentindexname = String.Empty; int[] indexcolumnordinals = new int[16]; int[] pkcolumnordinals = new int[16]; int npkcols = 0; int ncols = 0; // No of cols in the index bool partialcolumnset = false; int ordinal; int indexordinal; IntPtr cbIndexLen = IntPtr.Zero; IntPtr cbColnameLen = IntPtr.Zero; int keyColumns = 0; // devnote: this test is already done by calling method ... // if (IsClosed) return; // protect against dead connection // MDAC Bug 75928 - SQLStatisticsW damages the string passed in // To protect the tablename we need to pass in a copy of that string String tablename1 = String.Copy(qualifiedTableName.GetTable(quoted)); // Select only unique indexes retcode = KeyInfoStatementHandle.Statistics(tablename1); if (retcode != ODBC32.RetCode.SUCCESS) { // We give up at this point return 0; } CNativeBuffer buffer = Buffer; bool mustRelease = false; Debug.Assert(buffer.Length >= 544, "Native buffer to small (_buffer.Length < 544)"); RuntimeHelpers.PrepareConstrainedRegions(); try { buffer.DangerousAddRef(ref mustRelease); const int colnameBufOffset = 0; const int indexBufOffset = 256; const int ordinalBufOffset = 512; const int colnameActualOffset = 520; const int indexActualOffset = 528; const int ordinalActualOffset = 536; HandleRef colnamebuf = buffer.PtrOffset(colnameBufOffset, 256); HandleRef indexbuf = buffer.PtrOffset(indexBufOffset, 256); HandleRef ordinalbuf = buffer.PtrOffset(ordinalBufOffset, 4); IntPtr colnameActual = buffer.PtrOffset(colnameActualOffset, IntPtr.Size).Handle; IntPtr indexActual = buffer.PtrOffset(indexActualOffset, IntPtr.Size).Handle; IntPtr ordinalActual = buffer.PtrOffset(ordinalActualOffset, IntPtr.Size).Handle; //We are interested in index name, column name, and ordinal buffer.WriteInt16(indexBufOffset, 0); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_STATISTICS.INDEXNAME), ODBC32.SQL_C.WCHAR, indexbuf, (IntPtr)256, indexActual); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_STATISTICS.ORDINAL_POSITION), ODBC32.SQL_C.SSHORT, ordinalbuf, (IntPtr)4, ordinalActual); buffer.WriteInt16(ordinalBufOffset, 0); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_STATISTICS.COLUMN_NAME), ODBC32.SQL_C.WCHAR, colnamebuf, (IntPtr)256, colnameActual); // Find the best unique index on the table, use the ones whose columns are // completely covered by the query. while (ODBC32.RetCode.SUCCESS == (retcode = KeyInfoStatementHandle.Fetch())) { cbColnameLen = buffer.ReadIntPtr(colnameActualOffset); cbIndexLen = buffer.ReadIntPtr(indexActualOffset); // If indexname is not returned, skip this row if (0 == buffer.ReadInt16(indexBufOffset)) continue; // Not an index row, get next row. columnname = buffer.PtrToStringUni(colnameBufOffset, (int)cbColnameLen/2/*cch*/); indexname = buffer.PtrToStringUni(indexBufOffset, (int)cbIndexLen/2/*cch*/); ordinal = (int) buffer.ReadInt16(ordinalBufOffset); if (SameIndexColumn(currentindexname, indexname, ordinal, ncols)) { // We are still working on the same index if (partialcolumnset) continue; // We don't have all the keys for this index, so we can't use it ordinal = this.GetOrdinalFromBaseColName(columnname, qualifiedTableName.Table); if (ordinal == -1) { partialcolumnset = true; } else { // Add the column to the index column set if (ncols < 16) indexcolumnordinals[ncols++] = ordinal; else // Can't deal with indexes > 16 columns partialcolumnset = true; } } else { // We got a new index, save the previous index information if (!partialcolumnset && (ncols != 0)) { // Choose the unique index with least columns as primary key if ((npkcols == 0) || (npkcols > ncols)){ npkcols = ncols; for (int i = 0 ; i < ncols ; i++) pkcolumnordinals[i] = indexcolumnordinals[i]; } } // Reset the parameters for a new index ncols = 0; currentindexname = indexname; partialcolumnset = false; // Add this column to index ordinal = this.GetOrdinalFromBaseColName(columnname, qualifiedTableName.Table); if (ordinal == -1) { partialcolumnset = true; } else { // Add the column to the index column set indexcolumnordinals[ncols++] = ordinal; } } // Go on to the next column } // Do we have an index? if (!partialcolumnset && (ncols != 0)) { // Choose the unique index with least columns as primary key if ((npkcols == 0) || (npkcols > ncols)){ npkcols = ncols; for (int i = 0 ; i < ncols ; i++) pkcolumnordinals[i] = indexcolumnordinals[i]; } } // Mark the chosen index as primary key if (npkcols != 0) { for (int i = 0 ; i < npkcols ; i++) { indexordinal = pkcolumnordinals[i]; keyColumns++; this.metadata[indexordinal].isKeyColumn = true; // should we set isNullable = false? // This makes the QuikTest against Jet fail // // test test test - we don't know if this is nulalble or not so why do we want to set it to a value? this.metadata[indexordinal].isNullable = false; this.metadata[indexordinal].isUnique = true; if (this.metadata[indexordinal].baseTableName == null) { this.metadata[indexordinal].baseTableName = qualifiedTableName.Table; } if (this.metadata[indexordinal].baseColumnName == null) { this.metadata[indexordinal].baseColumnName = columnname; } } } // Unbind the columns _cmdWrapper.FreeKeyInfoStatementHandle(ODBC32.STMT.UNBIND); } finally { if (mustRelease) { buffer.DangerousRelease(); } } return keyColumns; } internal bool SameIndexColumn(String currentindexname, String indexname, int ordinal, int ncols) { if (ADP.IsEmpty(currentindexname)){ return false; } if ((currentindexname == indexname) && (ordinal == ncols+1)) return true; return false; } internal int GetOrdinalFromBaseColName(String columnname) { return GetOrdinalFromBaseColName(columnname, null); } internal int GetOrdinalFromBaseColName(String columnname, String tablename) { if (ADP.IsEmpty(columnname)) { return -1; } if (this.metadata != null) { int count = FieldCount; for (int i = 0 ; i < count ; i++) { if ( (this.metadata[i].baseColumnName != null) && (columnname == this.metadata[i].baseColumnName)) { if (!ADP.IsEmpty(tablename)) { if (tablename == this.metadata[i].baseTableName) { return i; } // end if } // end if else { return i; } // end else } } } // We can't find it in base column names, try regular colnames return this.IndexOf(columnname); } // We try parsing the SQL statement to get the table name as a last resort when // the driver doesn't return this information back to us. // // we can't handle multiple tablenames (JOIN) // only the first tablename will be returned internal string GetTableNameFromCommandText() { if (command == null){ return null; } String localcmdtext = _cmdText; if (ADP.IsEmpty(localcmdtext)) { // fxcop return null; } String tablename; int idx; CStringTokenizer tokenstmt = new CStringTokenizer(localcmdtext, Connection.QuoteChar(ADP.GetSchemaTable)[0], Connection.EscapeChar(ADP.GetSchemaTable)); if (tokenstmt.StartsWith("select") == true) { // select command, search for from clause idx = tokenstmt.FindTokenIndex("from"); } else { if ((tokenstmt.StartsWith("insert") == true) || (tokenstmt.StartsWith("update") == true) || (tokenstmt.StartsWith("delete") == true) ) { // Get the following word idx = tokenstmt.CurrentPosition; } else idx = -1; } if (idx == -1) return null; // The next token is the table name tablename = tokenstmt.NextToken(); localcmdtext = tokenstmt.NextToken(); if ((localcmdtext.Length > 0) && (localcmdtext[0] == ',')) { return null; // can't handle multiple tables } if ((localcmdtext.Length == 2) && ((localcmdtext[0] == 'a') || (localcmdtext[0] == 'A')) && ((localcmdtext[1] == 's') || (localcmdtext[1] == 'S'))) { // aliased table, skip the alias name localcmdtext = tokenstmt.NextToken(); localcmdtext = tokenstmt.NextToken(); if ((localcmdtext.Length > 0) && (localcmdtext[0] == ',')) { return null; // Multiple tables } } return tablename; } internal void SetBaseTableNames(QualifiedTableName qualifiedTableName) { int count = FieldCount; for(int i=0; i 0)) { char quoteChar = _quoteChar[0]; if (str[0] == quoteChar) { Debug.Assert (str.Length > 1, "Illegal string, only one char that is a quote"); Debug.Assert (str[str.Length-1] == quoteChar, "Missing quote at end of string that begins with quote"); if (str.Length > 1 && str[str.Length-1] == quoteChar) { str = str.Substring(1, str.Length-2); } } } return str; } } sealed private class MetaData { internal int ordinal; internal TypeMap typemap; internal SQLLEN size; internal byte precision; internal byte scale; internal bool isAutoIncrement; internal bool isUnique; internal bool isReadOnly; internal bool isNullable; internal bool isRowVersion; internal bool isLong; internal bool isKeyColumn; internal string baseSchemaName; internal string baseCatalogName; internal string baseTableName; internal string baseColumnName; } } } // File provided for Reference Use Only by Microsoft Corporation (c) 2007. //------------------------------------------------------------------------------ // // Copyright (c) Microsoft Corporation. All rights reserved. // //[....] //[....] //----------------------------------------------------------------------------- using System; using System.Collections; using System.Collections.Generic; using System.ComponentModel; using System.Data; using System.Data.Common; using System.Data.ProviderBase; using System.Diagnostics; using System.Globalization; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; // StringBuilder using System.Threading; namespace System.Data.Odbc { #if WINFSInternalOnly internal #else public #endif sealed class OdbcDataReader : DbDataReader { private OdbcCommand command; private int recordAffected = -1; private FieldNameLookup _fieldNameLookup; private DbCache dataCache; private enum HasRowsStatus { DontKnow = 0, HasRows = 1, HasNoRows = 2, } private HasRowsStatus _hasRows = HasRowsStatus.DontKnow; private bool _isClosed; private bool _isRead; private bool _isValidResult; private bool _noMoreResults; private bool _noMoreRows; private bool _skipReadOnce; private int _hiddenColumns; // number of hidden columns private CommandBehavior _commandBehavior; // track current row and column, will be set on the first Fetch call private int _row = -1; private int _column = -1; // used to track position in field for sucessive reads in case of Sequential Access private long _sequentialBytesRead; private static int _objectTypeCount; // Bid counter internal readonly int ObjectID = System.Threading.Interlocked.Increment(ref _objectTypeCount); // the statement handle here is just a copy of the statement handle owned by the command // the DataReader must not free the statement handle. this is done by the command // private MetaData[] metadata; private DataTable schemaTable; // MDAC 68336 private string _cmdText; // get a copy in case the command text on the command is changed ... private CMDWrapper _cmdWrapper; internal OdbcDataReader(OdbcCommand command, CMDWrapper cmdWrapper, CommandBehavior commandbehavior) { OdbcConnection.VerifyExecutePermission(); Debug.Assert(command != null, "Command null on OdbcDataReader ctor"); this.command = command; _commandBehavior = commandbehavior; _cmdText = command.CommandText; // get a copy in case the command text on the command is changed ... _cmdWrapper = cmdWrapper; } private CNativeBuffer Buffer { get { CNativeBuffer value = _cmdWrapper._dataReaderBuf; if (null == value) { Debug.Assert(false, "object is disposed"); throw new ObjectDisposedException(GetType().Name); } return value; } } private OdbcConnection Connection { get { if (null != _cmdWrapper) { return _cmdWrapper.Connection; } else { return null; } } } internal OdbcCommand Command { get { return this.command; } set { this.command=value; } } private OdbcStatementHandle StatementHandle { get { return _cmdWrapper.StatementHandle; } } private OdbcStatementHandle KeyInfoStatementHandle { get { return _cmdWrapper.KeyInfoStatement; } } internal bool IsBehavior(CommandBehavior behavior) { return IsCommandBehavior(behavior); } internal bool IsCancelingCommand { get { if (this.command != null) { return command.Canceling; } return false; } } internal bool IsNonCancelingCommand { get { if (this.command != null) { return !command.Canceling; } return false; } } override public int Depth { get { if (IsClosed) { // MDAC 63669 throw ADP.DataReaderClosed("Depth"); } return 0; } } override public int FieldCount { get { if (IsClosed) { // MDAC 63669 throw ADP.DataReaderClosed("FieldCount"); } if (_noMoreResults) { // MDAC 93325 return 0; } if (null == this.dataCache) { Int16 cColsAffected; ODBC32.RetCode retcode = this.FieldCountNoThrow(out cColsAffected); if(retcode != ODBC32.RetCode.SUCCESS) { Connection.HandleError(StatementHandle, retcode); } } return ((null != this.dataCache) ? this.dataCache._count : 0); } } // HasRows // // Use to detect wheter there are one ore more rows in the result without going through Read // May be called at any time // Basically it calls Read and sets a flag so that the actual Read call will be skipped once // override public bool HasRows { get { if (IsClosed) { throw ADP.DataReaderClosed("HasRows"); } if (_hasRows == HasRowsStatus.DontKnow){ Read(); // _skipReadOnce = true; // need to skip Read once because we just did it } return (_hasRows == HasRowsStatus.HasRows); } } internal ODBC32.RetCode FieldCountNoThrow(out Int16 cColsAffected) { if (IsCancelingCommand) { cColsAffected = 0; return ODBC32.RetCode.ERROR; } ODBC32.RetCode retcode = StatementHandle.NumberOfResultColumns(out cColsAffected); if (retcode == ODBC32.RetCode.SUCCESS) { _hiddenColumns = 0; if (IsCommandBehavior(CommandBehavior.KeyInfo)) { // we need to search for the first hidden column // if (!Connection.ProviderInfo.NoSqlSoptSSNoBrowseTable && !Connection.ProviderInfo.NoSqlSoptSSHiddenColumns) { for (int i=0; i0) { // case 2 - fixed-size types have _bufferSize set to positive value // call GetValue(i) as before the fix of SQLBUVSTS01:110664 // note, when SQLGetData is called for a fixed length type, the buffer size is always ignored and // the data will always be read off the wire return Convert.IsDBNull(GetValue(i)); } else { // case 3 - the data has variable type, use GetData with 0 length // GetData will return false only if the object cached as DbNull // GetData will put DbNull in cache only if the SQLGetData returns SQL_NULL_DATA, otherwise it does not change it int dummy; return !GetData(i, GetSqlType(i)._sql_c, 0 /* zero buffer read, no data should be lost */, out dummy); } } override public Byte GetByte(int i) { return (Byte)internalGetByte(i); } private object internalGetByte(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if (GetData(i, ODBC32.SQL_C.UTINYINT)) { this.dataCache[i] = Buffer.ReadByte(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Char GetChar(int i) { return (Char)internalGetChar(i); } private object internalGetChar(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if (GetData(i, ODBC32.SQL_C.WCHAR)) { this.dataCache[i] = Buffer.ReadChar(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Int16 GetInt16(int i) { return (Int16)internalGetInt16(i); } private object internalGetInt16(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if (GetData(i, ODBC32.SQL_C.SSHORT)) { this.dataCache[i] = Buffer.ReadInt16(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Int32 GetInt32(int i) { return (Int32)internalGetInt32(i); } private object internalGetInt32(int i){ if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.SLONG)){ this.dataCache[i] = Buffer.ReadInt32(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Int64 GetInt64(int i) { return (Int64)internalGetInt64(i); } // --------------------------------------------------------------------------------------------- // // internal internalGetInt64 // ------------------------- // Get Value of type SQL_BIGINT // Since the driver refused to accept the type SQL_BIGINT we read that // as SQL_C_WCHAR and convert it back to the Int64 data type // private object internalGetInt64(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if (GetData(i, ODBC32.SQL_C.WCHAR)) { string value = (string)Buffer.MarshalToManaged(0, ODBC32.SQL_C.WCHAR, ODBC32.SQL_NTS); this.dataCache[i] = Int64.Parse(value, CultureInfo.InvariantCulture); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public bool GetBoolean(int i) { return (bool) internalGetBoolean(i); } private object internalGetBoolean(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.BIT)){ this.dataCache[i] = Buffer.MarshalToManaged(0, ODBC32.SQL_C.BIT, -1); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public float GetFloat(int i) { return (float)internalGetFloat(i); } private object internalGetFloat(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.REAL)){ this.dataCache[i] = Buffer.ReadSingle(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } public DateTime GetDate(int i) { return (DateTime)internalGetDate(i); } private object internalGetDate(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.TYPE_DATE)){ this.dataCache[i] = Buffer.MarshalToManaged(0, ODBC32.SQL_C.TYPE_DATE, -1); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public DateTime GetDateTime(int i) { return (DateTime)internalGetDateTime(i); } private object internalGetDateTime(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.TYPE_TIMESTAMP)){ this.dataCache[i] = Buffer.MarshalToManaged(0, ODBC32.SQL_C.TYPE_TIMESTAMP, -1); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public decimal GetDecimal(int i) { return (decimal)internalGetDecimal(i); } // --------------------------------------------------------------------------------------------- // // internal GetDecimal // ------------------- // Get Value of type SQL_DECIMAL or SQL_NUMERIC // Due to provider incompatibilities with SQL_DECIMAL or SQL_NUMERIC types we always read the value // as SQL_C_WCHAR and convert it back to the Decimal data type // private object internalGetDecimal(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.WCHAR )){ string s = null; try { s = (string)Buffer.MarshalToManaged(0, ODBC32.SQL_C.WCHAR, ODBC32.SQL_NTS); this.dataCache[i] = Decimal.Parse(s, System.Globalization.CultureInfo.InvariantCulture); } catch(OverflowException e) { this.dataCache[i] = s; throw e; } } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public double GetDouble(int i) { return (double)internalGetDouble(i); } private object internalGetDouble(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.DOUBLE)){ this.dataCache[i] = Buffer.ReadDouble(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public Guid GetGuid(int i) { return (Guid)internalGetGuid(i); } private object internalGetGuid(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.GUID)){ this.dataCache[i] = Buffer.ReadGuid(0); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } override public String GetString(int i) { return (String)internalGetString(i); } private object internalGetString(int i) { if(_isRead) { if(this.dataCache.AccessIndex(i) == null) { // Obtain _ALL_ the characters // Note: We can bind directly as WCHAR in ODBC and the DM will convert to and // from ANSI if not supported by the driver. // // Note: The driver always returns the raw length of the data, minus the // terminator. This means that our buffer length always includes the terminator // charactor, so when determining which characters to count, and if more data // exists, it should not take the terminator into effect. // CNativeBuffer buffer = Buffer; // that does not make sense unless we expect four byte terminators int cbMaxData = buffer.Length - 4; // The first time GetData returns the true length (so we have to min it). // We also pass in the true length to the marshal function since there could be // embedded nulls // int lengthOrIndicator; if (GetData(i, ODBC32.SQL_C.WCHAR, buffer.Length - 2, out lengthOrIndicator)) { // RFC 50002644: we do not expect negative values from GetData call except SQL_NO_TOTAL(== -4) // note that in general you should not trust third-party providers so such asserts should be // followed by exception. I did not add it now to avoid breaking change Debug.Assert(lengthOrIndicator >= 0 || lengthOrIndicator == ODBC32.SQL_NO_TOTAL, "unexpected lengthOrIndicator value"); if (lengthOrIndicator <= cbMaxData && (ODBC32.SQL_NO_TOTAL != lengthOrIndicator)) { // all data read? good! Directly marshal to a string and we're done // string strdata = buffer.PtrToStringUni(0, Math.Min(lengthOrIndicator, cbMaxData) / 2); this.dataCache[i] = strdata; return strdata; } // We need to chunk the data // Char[] buffer for the junks // StringBuilder for the actual string // Char[] rgChars = new Char[cbMaxData / 2]; // RFC 50002644: negative value cannot be used for capacity. // in case of SQL_NO_TOTAL, set the capacity to cbMaxData, StringBuilder will automatically reallocate // its internal buffer when appending more data int cbBuilderInitialCapacity = (lengthOrIndicator == ODBC32.SQL_NO_TOTAL) ? cbMaxData : lengthOrIndicator; StringBuilder builder = new StringBuilder(cbBuilderInitialCapacity / 2); bool gotData; int cchJunk; int cbActual = cbMaxData; int cbMissing = (ODBC32.SQL_NO_TOTAL == lengthOrIndicator) ? -1 : lengthOrIndicator - cbActual; do { cchJunk = cbActual / 2; buffer.ReadChars(0, rgChars, 0, cchJunk); builder.Append(rgChars, 0, cchJunk); if(0 == cbMissing) { break; // done } gotData = GetData(i, ODBC32.SQL_C.WCHAR, buffer.Length - 2, out lengthOrIndicator); // RFC 50002644: we do not expect negative values from GetData call except SQL_NO_TOTAL(== -4) // note that in general you should not trust third-party providers so such asserts should be // followed by exception. I did not add it now to avoid breaking change Debug.Assert(lengthOrIndicator >= 0 || lengthOrIndicator == ODBC32.SQL_NO_TOTAL, "unexpected lengthOrIndicator value"); if (ODBC32.SQL_NO_TOTAL != lengthOrIndicator) { cbActual = Math.Min(lengthOrIndicator, cbMaxData); if(0 < cbMissing) { cbMissing -= cbActual; } else { // it is a last call to SqlGetData that started with SQL_NO_TOTAL // the last call to SqlGetData must always return the length of the // data, not zero or SqlNoTotal (see Odbc Programmers Reference) Debug.Assert(cbMissing == -1 && lengthOrIndicator <= cbMaxData); cbMissing = 0; } } } while(gotData); this.dataCache[i] = builder.ToString(); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } public TimeSpan GetTime(int i) { return (TimeSpan)internalGetTime(i); } private object internalGetTime(int i) { if (_isRead) { if(this.dataCache.AccessIndex(i) == null) { if(GetData(i, ODBC32.SQL_C.TYPE_TIME)){ this.dataCache[i] = Buffer.MarshalToManaged(0, ODBC32.SQL_C.TYPE_TIME, -1); } } return this.dataCache[i]; } throw ADP.DataReaderNoData(); } private void SetCurrentRowColumnInfo(int row, int column) { if (_row != row || _column != column) { _row = row; _column = column; // reset the blob reader when moved to new column _sequentialBytesRead = 0; } } override public long GetBytes(int i, long dataIndex, byte[] buffer, int bufferIndex, int length) { return GetBytesOrChars(i, dataIndex, buffer, false /* bytes buffer */, bufferIndex, length); } override public long GetChars(int i, long dataIndex, char[] buffer, int bufferIndex, int length) { return GetBytesOrChars(i, dataIndex, buffer, true /* chars buffer */, bufferIndex, length); } // unify the implementation of GetChars and GetBytes to prevent code duplicate private long GetBytesOrChars(int i, long dataIndex, Array buffer, bool isCharsBuffer, int bufferIndex, int length) { if (IsClosed) { throw ADP.DataReaderNoData(); } if(!_isRead) { throw ADP.DataReaderNoData(); } if(dataIndex < 0 ) { // test only for negative value here, Int32.MaxValue will be validated only in case of random access throw ADP.ArgumentOutOfRange("dataIndex"); } if(bufferIndex < 0) { throw ADP.ArgumentOutOfRange("bufferIndex"); } if(length < 0) { throw ADP.ArgumentOutOfRange("length"); } string originalMethodName = isCharsBuffer ? "GetChars" : "GetBytes"; // row/column info will be reset only if changed SetCurrentRowColumnInfo(_row, i); // Possible cases: // 1. random access, user asks for the value first time: bring it and cache the value // 2. random access, user already queried the value: use the cache // 3. sequential access, cache exists: user already read this value using different method (it becomes cached) // use the cache - preserve the original behavior to minimize regression risk // 4. sequential access, no cache: (fixed now) user reads the bytes/chars in sequential order (no cache) object cachedObj = null; // The cached object (if there is one) // Get cached object, ensure the correct type using explicit cast, to preserve same behavior as before if (isCharsBuffer) cachedObj = (string)this.dataCache[i]; else cachedObj = (byte[])this.dataCache[i]; bool isRandomAccess = !IsCommandBehavior(CommandBehavior.SequentialAccess); if (isRandomAccess || (cachedObj != null)) { // random access (cases 1 or 2) and sequential access with cache (case 3) // preserve the original behavior as before the fix if (Int32.MaxValue < dataIndex) { // indices greater than allocable size are not supported in random access // (negative value is already tested in the beginning of ths function) throw ADP.ArgumentOutOfRange("dataIndex"); } if(cachedObj == null) { // case 1, get the value and cache it // internalGetString/internalGetBytes will get the entire value and cache it, // since we are not in SequentialAccess (isRandomAccess is true), it is OK if (isCharsBuffer) { cachedObj = (String)internalGetString(i); Debug.Assert((cachedObj != null), "internalGetString should always return non-null or raise exception"); } else { cachedObj = (byte[])internalGetBytes(i); Debug.Assert((cachedObj != null), "internalGetBytes should always return non-null or raise exception"); } // continue to case 2 } // after this point the value is cached (case 2 or 3) // if it is DbNull, cast exception will be raised (same as before the 110664 fix) int cachedObjectLength = isCharsBuffer ? ((string)cachedObj).Length : ((byte[])cachedObj).Length; // the user can ask for the length of the field by passing in a null pointer for the buffer if (buffer == null) { // return the length if that's all what user needs return cachedObjectLength; } // user asks for bytes if (length == 0) { return 0; // Nothing to do ... } if (dataIndex >= cachedObjectLength) { // no more bytes to read // see also MDAC bug 73298 return 0; } int lengthFromDataIndex = cachedObjectLength - (int)dataIndex; int lengthOfCopy = Math.Min(lengthFromDataIndex, length); // silently reduce the length to avoid regression from EVERETT lengthOfCopy = Math.Min(lengthOfCopy, buffer.Length - bufferIndex); if (lengthOfCopy <= 0) return 0; // MDAC Bug 73298 if (isCharsBuffer) ((string)cachedObj).CopyTo((int)dataIndex, (char[])buffer, bufferIndex, lengthOfCopy); else Array.Copy((byte[])cachedObj, (int)dataIndex, (byte[])buffer, bufferIndex, lengthOfCopy); return lengthOfCopy; } else { // sequential access, case 4 // SQLBU:532243 -- For SequentialAccess we need to read a chunk of // data and not cache it. // Note: If the object was previous cached (see case 3 above), the function will go thru 'if' path, to minimize // regressions // the user can ask for the length of the field by passing in a null pointer for the buffer if (buffer == null) { // Get len. of remaining data from provider if (isCharsBuffer) return GetFieldLength(i, ODBC32.SQL_C.WCHAR); else return GetFieldLength(i, ODBC32.SQL_C.BINARY); } // check if user tries to read data that was already received // if yes, this violates 'sequential access' if ((isCharsBuffer && dataIndex < _sequentialBytesRead / 2) || (!isCharsBuffer && dataIndex < _sequentialBytesRead)) { // backward reading is not allowed in sequential access throw ADP.NonSeqByteAccess( dataIndex, _sequentialBytesRead, originalMethodName ); } // note that it is actually not possible to read with an offset (dataIndex) // therefore, adjust the data index relative to number of bytes already read if (isCharsBuffer) dataIndex -= _sequentialBytesRead / 2; else dataIndex -= _sequentialBytesRead; if (dataIndex > 0) { // user asked to skip bytes - it is OK, even in case of sequential access // forward the stream by dataIndex bytes/chars int charsOrBytesRead = readBytesOrCharsSequentialAccess(i, null, isCharsBuffer, 0, dataIndex); if (charsOrBytesRead < dataIndex) { // the stream ended before we forwarded to the requested index, stop now return 0; } } // ODBC driver now points to the correct position, start filling the user buffer from now // Make sure we don't overflow the user provided buffer // Note: SqlDataReader will raise exception if there is no enough room for length requested. // In case of ODBC, I decided to keep this consistent with random access after consulting with PM. length = Math.Min(length, buffer.Length - bufferIndex); if (length <= 0) return 0; // nothing to read // fill the user's buffer return readBytesOrCharsSequentialAccess(i, buffer, isCharsBuffer, bufferIndex, length); } } // fill the user's buffer (char[] or byte[], depending on isCharsBuffer) // if buffer is null, just skip the bytesOrCharsLength bytes or chars private int readBytesOrCharsSequentialAccess(int i, Array buffer, bool isCharsBuffer, int bufferIndex, long bytesOrCharsLength) { Debug.Assert(bufferIndex >= 0, "Negative buffer index"); Debug.Assert(bytesOrCharsLength >= 0, "Negative number of bytes or chars to read"); // validated by the caller Debug.Assert(buffer == null || bytesOrCharsLength <= (buffer.Length - bufferIndex), "Not enough space in user's buffer"); int totalBytesOrCharsRead = 0; string originalMethodName = isCharsBuffer ? "GetChars" : "GetBytes"; // we need length in bytes, b/c that is what SQLGetData expects long cbLength = (isCharsBuffer)? checked(bytesOrCharsLength * 2) : bytesOrCharsLength; // continue reading from the driver until we fill the user's buffer or until no more data is available // the data is pumped first into the internal native buffer and after that copied into the user's one if buffer is not null CNativeBuffer internalNativeBuffer = this.Buffer; // read the data in loop up to th user's length // if the data size is less than requested or in case of error, the while loop will stop in the middle while (cbLength > 0) { // max data to be read, in bytes, not including null-terminator for WCHARs int cbReadMax; // read from the driver bool isNotDbNull; int cbTotal; // read either bytes or chars, depending on the method called if (isCharsBuffer) { // for WCHAR buffers, we need to leave space for null-terminator (2 bytes) // reserve 2 bytes for null-terminator and 2 bytes to prevent assert in GetData // if SQL_NO_TOTAL is returned, this ammount is read from the wire, in bytes cbReadMax = (int)Math.Min(cbLength, internalNativeBuffer.Length - 4); // SQLGetData will always append it - we do not to copy it to user's buffer isNotDbNull = GetData(i, ODBC32.SQL_C.WCHAR, cbReadMax + 2, out cbTotal); } else { // reserve 2 bytes to prevent assert in GetData // when querying bytes, no need to reserve space for null cbReadMax = (int)Math.Min(cbLength, internalNativeBuffer.Length - 2); isNotDbNull = GetData(i, ODBC32.SQL_C.BINARY, cbReadMax, out cbTotal); } if (!isNotDbNull) { // DbNull received, neither GetBytes nor GetChars should be used with DbNull value // two options // 1. be consistent with SqlDataReader, raise SqlNullValueException // 2. be consistent with other Get* methods of OdbcDataReader and raise InvalidCastException // after consulting with Himanshu (PM), decided to go with option 2 (raise cast exception) // the below line will cause proper InvalidCastException, as expected, since DbNull is not convertible to byte[] or char[] object dummy; if (isCharsBuffer) dummy = (string)this.dataCache[i]; else dummy = (byte[])this.dataCache[i]; // execution will not go thru this point } int cbRead; // will hold number of bytes read in this loop bool noDataRemained = false; if (cbTotal == 0) { // no bytes read, stop break; } else if (ODBC32.SQL_NO_TOTAL == cbTotal) { // the driver has filled the internal buffer, but the length of remained data is still unknown // we will continue looping until SQLGetData indicates the end of data or user buffer is fully filled cbRead = cbReadMax; } else { Debug.Assert((cbTotal > 0), "GetData returned negative value, which is not SQL_NO_TOTAL"); // GetData uses SQLGetData, which StrLen_or_IndPtr (cbTotal in our case) to the current buf + remained buf (if any) if (cbTotal > cbReadMax) { // in this case the amount of bytes/chars read will be the max requested (and more bytes can be read) cbRead = cbReadMax; } else { // SQLGetData read all the available data, no more remained // continue processing this chunk and stop cbRead = cbTotal; noDataRemained = true; } } _sequentialBytesRead += cbRead; // update internal state and copy the data to user's buffer if (isCharsBuffer) { int cchRead = cbRead / 2; if (buffer != null) { internalNativeBuffer.ReadChars(0, (char[])buffer, bufferIndex, cchRead); bufferIndex += cchRead; } totalBytesOrCharsRead += cchRead; } else { if (buffer != null) { internalNativeBuffer.ReadBytes(0, (byte[])buffer, bufferIndex, cbRead); bufferIndex += cbRead; } totalBytesOrCharsRead += cbRead; } cbLength -= cbRead; // stop if no data remained if (noDataRemained) break; } return totalBytesOrCharsRead; } private object internalGetBytes(int i) { if(this.dataCache.AccessIndex(i) == null) { // Obtain _ALL_ the bytes... // The first time GetData returns the true length (so we have to min it). Byte[] rgBytes; int cbBufferLen = Buffer.Length - 4; int cbActual; int cbOffset = 0; if(GetData(i, ODBC32.SQL_C.BINARY, cbBufferLen, out cbActual)) { CNativeBuffer buffer = Buffer; if(ODBC32.SQL_NO_TOTAL != cbActual) { rgBytes = new Byte[cbActual]; Buffer.ReadBytes(0, rgBytes, cbOffset, Math.Min(cbActual, cbBufferLen)); // Chunking. The data may be larger than our native buffer. In which case // instead of growing the buffer (out of control), we will read in chunks to // reduce memory footprint size. while(cbActual > cbBufferLen) { // The first time GetData returns the true length. Then successive calls // return the remaining data. bool flag = GetData(i, ODBC32.SQL_C.BINARY, cbBufferLen, out cbActual); Debug.Assert(flag, "internalGetBytes - unexpected invalid result inside if-block"); cbOffset += cbBufferLen; buffer.ReadBytes(0, rgBytes, cbOffset, Math.Min(cbActual, cbBufferLen)); } } else { List junkArray = new List (); int junkSize; int totalSize = 0; do { junkSize = (ODBC32.SQL_NO_TOTAL != cbActual) ? cbActual : cbBufferLen; rgBytes = new Byte[junkSize]; totalSize += junkSize; buffer.ReadBytes(0, rgBytes, 0, junkSize); junkArray.Add(rgBytes); } while ((ODBC32.SQL_NO_TOTAL == cbActual) && GetData(i, ODBC32.SQL_C.BINARY, cbBufferLen, out cbActual)); rgBytes = new Byte[totalSize]; foreach(Byte[] junk in junkArray) { junk.CopyTo(rgBytes, cbOffset); cbOffset += junk.Length; } } // always update the cache this.dataCache[i] = rgBytes; } } return this.dataCache[i]; } // GetColAttribute // --------------- // [IN] iColumn ColumnNumber // [IN] v3FieldId FieldIdentifier of the attribute for version3 drivers (>=3.0) // [IN] v2FieldId FieldIdentifier of the attribute for version2 drivers (<3.0) // // returns the value of the FieldIdentifier field of the column // or -1 if the FieldIdentifier wasn't supported by the driver // private SQLLEN GetColAttribute(int iColumn, ODBC32.SQL_DESC v3FieldId, ODBC32.SQL_COLUMN v2FieldId, ODBC32.HANDLER handler) { Int16 cchNameLength = 0; SQLLEN numericAttribute; ODBC32.RetCode retcode; // protect against dead connection, dead or canceling command. if ((Connection == null) || _cmdWrapper.Canceling) { return -1; } //Ordinals are 1:base in odbc OdbcStatementHandle stmt = StatementHandle; if (Connection.IsV3Driver) { retcode = stmt.ColumnAttribute(iColumn+1, (short)v3FieldId, Buffer, out cchNameLength, out numericAttribute); } else if (v2FieldId != (ODBC32.SQL_COLUMN)(-1)) { retcode = stmt.ColumnAttribute(iColumn+1, (short)v2FieldId, Buffer, out cchNameLength, out numericAttribute); } else { return 0; } if (retcode != ODBC32.RetCode.SUCCESS) { if (retcode == ODBC32.RetCode.ERROR) { if ("HY091" == Command.GetDiagSqlState()) { Connection.FlagUnsupportedColAttr(v3FieldId, v2FieldId); } } if(handler == ODBC32.HANDLER.THROW) { Connection.HandleError(stmt, retcode); } return -1; } return numericAttribute; } // GetColAttributeStr // --------------- // [IN] iColumn ColumnNumber // [IN] v3FieldId FieldIdentifier of the attribute for version3 drivers (>=3.0) // [IN] v2FieldId FieldIdentifier of the attribute for version2 drivers (<3.0) // // returns the stringvalue of the FieldIdentifier field of the column // or null if the string returned was empty or if the FieldIdentifier wasn't supported by the driver // private String GetColAttributeStr(int i, ODBC32.SQL_DESC v3FieldId, ODBC32.SQL_COLUMN v2FieldId, ODBC32.HANDLER handler) { ODBC32.RetCode retcode; Int16 cchNameLength = 0; SQLLEN numericAttribute; CNativeBuffer buffer = Buffer; buffer.WriteInt16(0, 0); OdbcStatementHandle stmt = StatementHandle; // protect against dead connection if (Connection == null || _cmdWrapper.Canceling || stmt == null) { return ""; } if (Connection.IsV3Driver) { retcode = stmt.ColumnAttribute(i+1, (short)v3FieldId, buffer, out cchNameLength, out numericAttribute); } else if (v2FieldId != (ODBC32.SQL_COLUMN)(-1)) { retcode = stmt.ColumnAttribute(i+1, (short)v2FieldId, buffer, out cchNameLength, out numericAttribute); } else { return null; } if((retcode != ODBC32.RetCode.SUCCESS) || (cchNameLength == 0)) { if (retcode == ODBC32.RetCode.ERROR) { if ("HY091" == Command.GetDiagSqlState()) { Connection.FlagUnsupportedColAttr(v3FieldId, v2FieldId); } } if(handler == ODBC32.HANDLER.THROW) { Connection.HandleError(stmt, retcode); } return null; } string retval = buffer.PtrToStringUni(0, cchNameLength/2 /*cch*/); return retval; } // todo: Another 3.0 only attribute that is guaranteed to fail on V2 driver. // need to special case this for V2 drivers. // private String GetDescFieldStr(int i, ODBC32.SQL_DESC attribute, ODBC32.HANDLER handler) { Int32 numericAttribute = 0; // protect against dead connection, dead or canceling command. if ((Connection == null) || _cmdWrapper.Canceling) { return ""; } // APP_PARAM_DESC is a (ODBCVER >= 0x0300) attribute if (!Connection.IsV3Driver) { Debug.Assert (false, "Non-V3 driver. Must not call GetDescFieldStr"); return null; } ODBC32.RetCode retcode; CNativeBuffer buffer = Buffer; // Need to set the APP_PARAM_DESC values here using(OdbcDescriptorHandle hdesc = new OdbcDescriptorHandle(StatementHandle, ODBC32.SQL_ATTR.APP_PARAM_DESC)) { //SQLGetDescField retcode = hdesc.GetDescriptionField(i+1, attribute, buffer, out numericAttribute); //Since there are many attributes (column, statement, etc), that may or may not be //supported, we don't want to throw (which obtains all errorinfo, marshals strings, //builds exceptions, etc), in common cases, unless we absolutly need this info... if((retcode != ODBC32.RetCode.SUCCESS) || (numericAttribute == 0)) { if (retcode == ODBC32.RetCode.ERROR) { if ("HY091" == Command.GetDiagSqlState()) { Connection.FlagUnsupportedColAttr(attribute, (ODBC32.SQL_COLUMN)0); } } if(handler == ODBC32.HANDLER.THROW) { Connection.HandleError(StatementHandle, retcode); } return null; } } string retval = buffer.PtrToStringUni(0, numericAttribute/2 /*cch*/); return retval; } // GetFieldLength // This will return the correct number of items in the field. // private int GetFieldLength(int i, ODBC32.SQL_C sqlctype) { Debug.Assert(((sqlctype == ODBC32.SQL_C.BINARY)||(sqlctype == ODBC32.SQL_C.WCHAR)), "Illegal Type for GetFieldLength"); if (IsCancelingCommand) throw ADP.DataReaderNoData(); Debug.Assert (null != StatementHandle, "Statement handle is null in DateReader"); IntPtr cbActual = IntPtr.Zero; int cb = 0; if (sqlctype== ODBC32.SQL_C.WCHAR) { cb = 2; } //SQLGetData Debug.Assert((Buffer.Length>=cb),"Unexpected size of _buffer"); CNativeBuffer buffer = Buffer; ODBC32.RetCode retcode = StatementHandle.GetData( (i+1), //Ordinals are 1:base in odbc sqlctype, buffer, cb, out cbActual); if(retcode != ODBC32.RetCode.SUCCESS && retcode != ODBC32.RetCode.SUCCESS_WITH_INFO && retcode != ODBC32.RetCode.NO_DATA) Connection.HandleError(StatementHandle, retcode); if (sqlctype== ODBC32.SQL_C.WCHAR) { return (int)cbActual/2; } return (int)cbActual; } private bool GetData(int i, ODBC32.SQL_C sqlctype) { // Never call GetData with anything larger than _buffer.Length-2. // We keep reallocating native buffers and it kills performance!!! int dummy; return GetData(i, sqlctype, Buffer.Length - 4, out dummy); } /// /// retrieves the data into this.Buffer. /// * If the data is DbNull, the value be also cached and false is returned. /// * if the data is not DbNull, the value is not cached and true is returned /// ///false if value is DbNull, true otherwise private bool GetData(int i, ODBC32.SQL_C sqlctype, int cb, out int cbActualOut) { IntPtr cbActual = IntPtr.Zero; // Length or an indicator value if (IsCancelingCommand){ throw ADP.DataReaderNoData(); } Debug.Assert (null != StatementHandle, "Statement handle is null in DateReader"); // Never call GetData with anything larger than _buffer.Length-2. // We keep reallocating native buffers and it kills performance!!! Debug.Assert(cb <= Buffer.Length-2, "GetData needs to Reallocate. Perf bug"); // SQLGetData CNativeBuffer buffer = Buffer; ODBC32.RetCode retcode = StatementHandle.GetData( (i+1), // Column ordinals start at 1 in odbc sqlctype, buffer, cb, out cbActual); switch (retcode) { case ODBC32.RetCode.SUCCESS: break; case ODBC32.RetCode.SUCCESS_WITH_INFO: if ((Int32)cbActual == ODBC32.SQL_NO_TOTAL) { break; } // devnote: don't we want to fire an event? break; default: Connection.HandleError(StatementHandle, retcode); break; } // reset the current row and column SetCurrentRowColumnInfo(_row, i); // SQL_NULL_DATA if(cbActual == (IntPtr)ODBC32.SQL_NULL_DATA) { this.dataCache[i] = DBNull.Value; cbActualOut = 0; return false; } //Return the actual size (for chunking scenarios) cbActualOut = (int)cbActual; return true; } override public bool Read() { if (IsClosed) { throw ADP.DataReaderClosed("Read"); } if (IsCancelingCommand) { _isRead = false; return false; } // HasRows needs to call into Read so we don't want to read on the actual Read call if (_skipReadOnce){ _skipReadOnce = false; return _isRead; } if (_noMoreRows || _noMoreResults || IsCommandBehavior(CommandBehavior.SchemaOnly)) return false; if (!_isValidResult) { return false; } ODBC32.RetCode retcode; //SQLFetch is only valid to call for row returning queries //We get: [24000]Invalid cursor state. So we could either check the count //ahead of time (which is cached), or check on error and compare error states. //Note: SQLFetch is also invalid to be called on a prepared (schemaonly) statement //SqlFetch retcode = StatementHandle.Fetch(); switch(retcode) { case ODBC32.RetCode.SUCCESS_WITH_INFO: Connection.HandleErrorNoThrow(StatementHandle, retcode); _hasRows = HasRowsStatus.HasRows; _isRead = true; break; case ODBC32.RetCode.SUCCESS: _hasRows = HasRowsStatus.HasRows; _isRead = true; break; case ODBC32.RetCode.NO_DATA: _isRead = false; if (_hasRows == HasRowsStatus.DontKnow) { _hasRows = HasRowsStatus.HasNoRows; } break; default: Connection.HandleError(StatementHandle, retcode); break; } //Null out previous cached row values. this.dataCache.FlushValues(); // if CommandBehavior == SingleRow we set _noMoreResults to true so that following reads will fail if (IsCommandBehavior(CommandBehavior.SingleRow)) { _noMoreRows = true; // no more rows, set to -1 SetCurrentRowColumnInfo(-1, 0); } else { // move to the next row SetCurrentRowColumnInfo(_row + 1, 0); } return _isRead; } // Called by odbccommand when executed for the first time internal void FirstResult() { Int16 cCols; SQLLEN cRowsAffected; cRowsAffected = GetRowCount(); // get rowcount of the current resultset (if any) CalculateRecordsAffected(cRowsAffected); // update recordsaffected ODBC32.RetCode retcode = FieldCountNoThrow(out cCols); if ((retcode == ODBC32.RetCode.SUCCESS) && (cCols == 0)) { NextResult(); } else { this._isValidResult = true; } } override public bool NextResult() { return NextResult(false, false); } private bool NextResult(bool disposing, bool allresults) { // if disposing, loop through all the remaining results and ignore error messages // if allresults, loop through all results and collect all error messages for a single exception // callers are via Close(false, true), Dispose(true, false), NextResult(false,false) Debug.Assert(!disposing || !allresults, "both disposing & allresults are true"); const int MaxConsecutiveFailure = 2000; // see WebData 72126 for why more than 1000 SQLLEN cRowsAffected; Int16 cColsAffected; ODBC32.RetCode retcode, firstRetCode = ODBC32.RetCode.SUCCESS; bool hasMoreResults; bool hasColumns = false; bool singleResult = IsCommandBehavior(CommandBehavior.SingleResult); if (IsClosed) { throw ADP.DataReaderClosed("NextResult"); } _fieldNameLookup = null; if (IsCancelingCommand || _noMoreResults) { return false; } //Blow away the previous cache (since the next result set will have a different shape, //different schema data, and different data. _isRead = false; _hasRows = HasRowsStatus.DontKnow; _fieldNameLookup = null; this.metadata = null; this.schemaTable = null; int loop = 0; // infinite loop protection, max out after 2000 consecutive failed results OdbcErrorCollection errors = null; // SQLBU 342112 do { _isValidResult = false; retcode = StatementHandle.MoreResults(); hasMoreResults = ((retcode == ODBC32.RetCode.SUCCESS) ||(retcode == ODBC32.RetCode.SUCCESS_WITH_INFO)); if (retcode == ODBC32.RetCode.SUCCESS_WITH_INFO) { Connection.HandleErrorNoThrow(StatementHandle, retcode); } else if (!disposing && (retcode != ODBC32.RetCode.NO_DATA) && (ODBC32.RetCode.SUCCESS != retcode)) { // allow for building comulative error messages. if (null == errors) { firstRetCode = retcode; errors = new OdbcErrorCollection(); } ODBC32.GetDiagErrors(errors, null, StatementHandle, retcode); ++loop; } if (!disposing && hasMoreResults) { loop = 0; cRowsAffected = GetRowCount(); // get rowcount of the current resultset (if any) CalculateRecordsAffected(cRowsAffected); // update recordsaffected if (!singleResult) { // update row- and columncount FieldCountNoThrow(out cColsAffected); hasColumns = (0 != cColsAffected); _isValidResult = hasColumns; } } } while ((!singleResult && hasMoreResults && !hasColumns) // repeat for results with no columns || ((ODBC32.RetCode.NO_DATA != retcode) && allresults && (loop < MaxConsecutiveFailure)) // or process all results until done || (singleResult && hasMoreResults)); // or for any result in singelResult mode if (MaxConsecutiveFailure <= loop) { Bid.Trace("2000 consecutive failed results"); } if(retcode == ODBC32.RetCode.NO_DATA) { this.dataCache = null; _noMoreResults = true; } if (null != errors) { Debug.Assert(!disposing, "errors while disposing"); errors.SetSource(Connection.Driver); OdbcException exception = OdbcException.CreateException(errors, firstRetCode); Connection.ConnectionIsAlive(exception); throw exception; } return (hasMoreResults); } private void BuildMetaDataInfo() { int count = FieldCount; MetaData[] metaInfos = new MetaData[count]; List qrytables; bool needkeyinfo = IsCommandBehavior(CommandBehavior.KeyInfo); bool isKeyColumn; bool isHidden; ODBC32.SQL_NULLABILITY nullable; if (needkeyinfo) qrytables = new List (); else qrytables = null; // Find out all the metadata info, not all of this info will be available in all cases // for(int i=0; i 0) ) { List .Enumerator tablesEnum = qrytables.GetEnumerator(); QualifiedTableName qualifiedTableName = new QualifiedTableName(Connection.QuoteChar(ADP.GetSchemaTable)); while(tablesEnum.MoveNext()) { // Find the primary keys, identity and autincrement columns qualifiedTableName.Table = tablesEnum.Current; if (RetrieveKeyInfo(needkeyinfo, qualifiedTableName, false) <= 0) { RetrieveKeyInfo(needkeyinfo, qualifiedTableName, true); } } } else { // Some drivers ( < 3.x ?) do not provide base table information. In this case try to // find it by parsing the statement QualifiedTableName qualifiedTableName = new QualifiedTableName(Connection.QuoteChar(ADP.GetSchemaTable), GetTableNameFromCommandText()); if (!ADP.IsEmpty(qualifiedTableName.Table)) { // fxcop SetBaseTableNames(qualifiedTableName); if (RetrieveKeyInfo(needkeyinfo, qualifiedTableName, false) <= 0) { RetrieveKeyInfo(needkeyinfo, qualifiedTableName, true); } } } } } private DataTable NewSchemaTable() { DataTable schematable = new DataTable("SchemaTable"); schematable.Locale = CultureInfo.InvariantCulture; schematable.MinimumCapacity = this.FieldCount; //Schema Columns DataColumnCollection columns = schematable.Columns; columns.Add(new DataColumn("ColumnName", typeof(System.String))); columns.Add(new DataColumn("ColumnOrdinal", typeof(System.Int32))); // UInt32 columns.Add(new DataColumn("ColumnSize", typeof(System.Int32))); // UInt32 columns.Add(new DataColumn("NumericPrecision", typeof(System.Int16))); // UInt16 columns.Add(new DataColumn("NumericScale", typeof(System.Int16))); columns.Add(new DataColumn("DataType", typeof(System.Object))); columns.Add(new DataColumn("ProviderType", typeof(System.Int32))); columns.Add(new DataColumn("IsLong", typeof(System.Boolean))); columns.Add(new DataColumn("AllowDBNull", typeof(System.Boolean))); columns.Add(new DataColumn("IsReadOnly", typeof(System.Boolean))); columns.Add(new DataColumn("IsRowVersion", typeof(System.Boolean))); columns.Add(new DataColumn("IsUnique", typeof(System.Boolean))); columns.Add(new DataColumn("IsKey", typeof(System.Boolean))); columns.Add(new DataColumn("IsAutoIncrement", typeof(System.Boolean))); columns.Add(new DataColumn("BaseSchemaName", typeof(System.String))); columns.Add(new DataColumn("BaseCatalogName", typeof(System.String))); columns.Add(new DataColumn("BaseTableName", typeof(System.String))); columns.Add(new DataColumn("BaseColumnName", typeof(System.String))); // MDAC Bug 79231 foreach (DataColumn column in columns) { column.ReadOnly = true; } return schematable; } // The default values are already defined in DbSchemaRows (see DbSchemaRows.cs) so there is no need to set any default value // override public DataTable GetSchemaTable() { if (IsClosed) { // MDAC 68331 throw ADP.DataReaderClosed("GetSchemaTable"); // can't use closed connection } if (_noMoreResults) { return null; // no more results } if (null != this.schemaTable) { return this.schemaTable; // return cached schematable } //Delegate, to have the base class setup the structure DataTable schematable = NewSchemaTable(); if (FieldCount == 0) { return schematable; } if (this.metadata == null) { BuildMetaDataInfo(); } DataColumn columnName = schematable.Columns["ColumnName"]; DataColumn columnOrdinal = schematable.Columns["ColumnOrdinal"]; DataColumn columnSize = schematable.Columns["ColumnSize"]; DataColumn numericPrecision = schematable.Columns["NumericPrecision"]; DataColumn numericScale = schematable.Columns["NumericScale"]; DataColumn dataType = schematable.Columns["DataType"]; DataColumn providerType = schematable.Columns["ProviderType"]; DataColumn isLong = schematable.Columns["IsLong"]; DataColumn allowDBNull = schematable.Columns["AllowDBNull"]; DataColumn isReadOnly = schematable.Columns["IsReadOnly"]; DataColumn isRowVersion = schematable.Columns["IsRowVersion"]; DataColumn isUnique = schematable.Columns["IsUnique"]; DataColumn isKey = schematable.Columns["IsKey"]; DataColumn isAutoIncrement = schematable.Columns["IsAutoIncrement"]; DataColumn baseSchemaName = schematable.Columns["BaseSchemaName"]; DataColumn baseCatalogName = schematable.Columns["BaseCatalogName"]; DataColumn baseTableName = schematable.Columns["BaseTableName"]; DataColumn baseColumnName = schematable.Columns["BaseColumnName"]; //Populate the rows (1 row for each column) int count = FieldCount; for(int i=0; i = 264, "Native buffer to small (_buffer.Length < 264)"); RuntimeHelpers.PrepareConstrainedRegions(); try { buffer.DangerousAddRef(ref mustRelease); if (needkeyinfo) { if (!Connection.ProviderInfo.NoSqlPrimaryKeys) { // Get the primary keys retcode = KeyInfoStatementHandle.PrimaryKeys( qualifiedTableName.Catalog, qualifiedTableName.Schema, qualifiedTableName.GetTable(quoted)); if ((retcode == ODBC32.RetCode.SUCCESS) || (retcode == ODBC32.RetCode.SUCCESS_WITH_INFO)) { bool noUniqueKey = false; // We are only interested in column name buffer.WriteInt16(0, 0); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_PRIMARYKEYS.COLUMNNAME), // Column Number ODBC32.SQL_C.WCHAR, buffer.PtrOffset(0, 256), (IntPtr)256, buffer.PtrOffset(256, IntPtr.Size).Handle); while (ODBC32.RetCode.SUCCESS == (retcode = KeyInfoStatementHandle.Fetch())) { cbActual = buffer.ReadIntPtr(256); columnname = buffer.PtrToStringUni(0, (int)cbActual/2/*cch*/); ordinal = this.GetOrdinalFromBaseColName(columnname); if (ordinal != -1) { keyColumns ++; this.metadata[ordinal].isKeyColumn = true; this.metadata[ordinal].isUnique = true; this.metadata[ordinal].isNullable = false; this.metadata[ordinal].baseTableName = qualifiedTableName.Table; if (this.metadata[ordinal].baseColumnName == null) { this.metadata[ordinal].baseColumnName = columnname; } } else { noUniqueKey = true; break; // no need to go over the remaining columns anymore } } // // if we got keyinfo from the column we dont even get to here! // // reset isUnique flag if the key(s) are not unique // if (noUniqueKey) { foreach (MetaData metadata in this.metadata) { metadata.isKeyColumn = false; } } // Unbind the column retcode = KeyInfoStatementHandle.BindColumn3( (short)(ODBC32.SQL_PRIMARYKEYS.COLUMNNAME), // SQLUSMALLINT ColumnNumber ODBC32.SQL_C.WCHAR, // SQLSMALLINT TargetType buffer.DangerousGetHandle()); // SQLLEN * StrLen_or_Ind } else { if ("IM001" == Command.GetDiagSqlState()) { Connection.ProviderInfo.NoSqlPrimaryKeys = true; } } } if (keyColumns == 0) { // SQLPrimaryKeys did not work. Have to use the slower SQLStatistics to obtain key information KeyInfoStatementHandle.MoreResults(); keyColumns += RetrieveKeyInfoFromStatistics(qualifiedTableName, quoted); } KeyInfoStatementHandle.MoreResults(); } // Get the special columns for version retcode = KeyInfoStatementHandle.SpecialColumns(qualifiedTableName.GetTable(quoted)); if ((retcode == ODBC32.RetCode.SUCCESS) || (retcode == ODBC32.RetCode.SUCCESS_WITH_INFO)) { // We are only interested in column name cbActual = IntPtr.Zero; buffer.WriteInt16(0, 0); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_SPECIALCOLUMNSET.COLUMN_NAME), ODBC32.SQL_C.WCHAR, buffer.PtrOffset(0, 256), (IntPtr)256, buffer.PtrOffset(256, IntPtr.Size).Handle); while (ODBC32.RetCode.SUCCESS == (retcode = KeyInfoStatementHandle.Fetch())) { cbActual = buffer.ReadIntPtr(256); columnname = buffer.PtrToStringUni(0, (int)cbActual/2/*cch*/); ordinal = this.GetOrdinalFromBaseColName(columnname); if (ordinal != -1) { this.metadata[ordinal].isRowVersion = true; if (this.metadata[ordinal].baseColumnName == null) { this.metadata[ordinal].baseColumnName = columnname; } } } // Unbind the column retcode = KeyInfoStatementHandle.BindColumn3( (short)(ODBC32.SQL_SPECIALCOLUMNSET.COLUMN_NAME), ODBC32.SQL_C.WCHAR, buffer.DangerousGetHandle()); retcode = KeyInfoStatementHandle.MoreResults(); } else { // i've seen "DIAG [HY000] [Microsoft][ODBC SQL Server Driver]Connection is busy with results for another hstmt (0) " // how did we get here? SqlServer does not allow a second handle (Keyinfostmt) anyway... // /* string msg = "Unexpected failure of SQLSpecialColumns. Code = " + Command.GetDiagSqlState(); Debug.Assert (false, msg); */ } } finally { if (mustRelease) { buffer.DangerousRelease(); } } return keyColumns; } // Uses SQLStatistics to retrieve key information for a table private int RetrieveKeyInfoFromStatistics(QualifiedTableName qualifiedTableName, bool quoted) { ODBC32.RetCode retcode; String columnname = String.Empty; String indexname = String.Empty; String currentindexname = String.Empty; int[] indexcolumnordinals = new int[16]; int[] pkcolumnordinals = new int[16]; int npkcols = 0; int ncols = 0; // No of cols in the index bool partialcolumnset = false; int ordinal; int indexordinal; IntPtr cbIndexLen = IntPtr.Zero; IntPtr cbColnameLen = IntPtr.Zero; int keyColumns = 0; // devnote: this test is already done by calling method ... // if (IsClosed) return; // protect against dead connection // MDAC Bug 75928 - SQLStatisticsW damages the string passed in // To protect the tablename we need to pass in a copy of that string String tablename1 = String.Copy(qualifiedTableName.GetTable(quoted)); // Select only unique indexes retcode = KeyInfoStatementHandle.Statistics(tablename1); if (retcode != ODBC32.RetCode.SUCCESS) { // We give up at this point return 0; } CNativeBuffer buffer = Buffer; bool mustRelease = false; Debug.Assert(buffer.Length >= 544, "Native buffer to small (_buffer.Length < 544)"); RuntimeHelpers.PrepareConstrainedRegions(); try { buffer.DangerousAddRef(ref mustRelease); const int colnameBufOffset = 0; const int indexBufOffset = 256; const int ordinalBufOffset = 512; const int colnameActualOffset = 520; const int indexActualOffset = 528; const int ordinalActualOffset = 536; HandleRef colnamebuf = buffer.PtrOffset(colnameBufOffset, 256); HandleRef indexbuf = buffer.PtrOffset(indexBufOffset, 256); HandleRef ordinalbuf = buffer.PtrOffset(ordinalBufOffset, 4); IntPtr colnameActual = buffer.PtrOffset(colnameActualOffset, IntPtr.Size).Handle; IntPtr indexActual = buffer.PtrOffset(indexActualOffset, IntPtr.Size).Handle; IntPtr ordinalActual = buffer.PtrOffset(ordinalActualOffset, IntPtr.Size).Handle; //We are interested in index name, column name, and ordinal buffer.WriteInt16(indexBufOffset, 0); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_STATISTICS.INDEXNAME), ODBC32.SQL_C.WCHAR, indexbuf, (IntPtr)256, indexActual); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_STATISTICS.ORDINAL_POSITION), ODBC32.SQL_C.SSHORT, ordinalbuf, (IntPtr)4, ordinalActual); buffer.WriteInt16(ordinalBufOffset, 0); retcode = KeyInfoStatementHandle.BindColumn2( (short)(ODBC32.SQL_STATISTICS.COLUMN_NAME), ODBC32.SQL_C.WCHAR, colnamebuf, (IntPtr)256, colnameActual); // Find the best unique index on the table, use the ones whose columns are // completely covered by the query. while (ODBC32.RetCode.SUCCESS == (retcode = KeyInfoStatementHandle.Fetch())) { cbColnameLen = buffer.ReadIntPtr(colnameActualOffset); cbIndexLen = buffer.ReadIntPtr(indexActualOffset); // If indexname is not returned, skip this row if (0 == buffer.ReadInt16(indexBufOffset)) continue; // Not an index row, get next row. columnname = buffer.PtrToStringUni(colnameBufOffset, (int)cbColnameLen/2/*cch*/); indexname = buffer.PtrToStringUni(indexBufOffset, (int)cbIndexLen/2/*cch*/); ordinal = (int) buffer.ReadInt16(ordinalBufOffset); if (SameIndexColumn(currentindexname, indexname, ordinal, ncols)) { // We are still working on the same index if (partialcolumnset) continue; // We don't have all the keys for this index, so we can't use it ordinal = this.GetOrdinalFromBaseColName(columnname, qualifiedTableName.Table); if (ordinal == -1) { partialcolumnset = true; } else { // Add the column to the index column set if (ncols < 16) indexcolumnordinals[ncols++] = ordinal; else // Can't deal with indexes > 16 columns partialcolumnset = true; } } else { // We got a new index, save the previous index information if (!partialcolumnset && (ncols != 0)) { // Choose the unique index with least columns as primary key if ((npkcols == 0) || (npkcols > ncols)){ npkcols = ncols; for (int i = 0 ; i < ncols ; i++) pkcolumnordinals[i] = indexcolumnordinals[i]; } } // Reset the parameters for a new index ncols = 0; currentindexname = indexname; partialcolumnset = false; // Add this column to index ordinal = this.GetOrdinalFromBaseColName(columnname, qualifiedTableName.Table); if (ordinal == -1) { partialcolumnset = true; } else { // Add the column to the index column set indexcolumnordinals[ncols++] = ordinal; } } // Go on to the next column } // Do we have an index? if (!partialcolumnset && (ncols != 0)) { // Choose the unique index with least columns as primary key if ((npkcols == 0) || (npkcols > ncols)){ npkcols = ncols; for (int i = 0 ; i < ncols ; i++) pkcolumnordinals[i] = indexcolumnordinals[i]; } } // Mark the chosen index as primary key if (npkcols != 0) { for (int i = 0 ; i < npkcols ; i++) { indexordinal = pkcolumnordinals[i]; keyColumns++; this.metadata[indexordinal].isKeyColumn = true; // should we set isNullable = false? // This makes the QuikTest against Jet fail // // test test test - we don't know if this is nulalble or not so why do we want to set it to a value? this.metadata[indexordinal].isNullable = false; this.metadata[indexordinal].isUnique = true; if (this.metadata[indexordinal].baseTableName == null) { this.metadata[indexordinal].baseTableName = qualifiedTableName.Table; } if (this.metadata[indexordinal].baseColumnName == null) { this.metadata[indexordinal].baseColumnName = columnname; } } } // Unbind the columns _cmdWrapper.FreeKeyInfoStatementHandle(ODBC32.STMT.UNBIND); } finally { if (mustRelease) { buffer.DangerousRelease(); } } return keyColumns; } internal bool SameIndexColumn(String currentindexname, String indexname, int ordinal, int ncols) { if (ADP.IsEmpty(currentindexname)){ return false; } if ((currentindexname == indexname) && (ordinal == ncols+1)) return true; return false; } internal int GetOrdinalFromBaseColName(String columnname) { return GetOrdinalFromBaseColName(columnname, null); } internal int GetOrdinalFromBaseColName(String columnname, String tablename) { if (ADP.IsEmpty(columnname)) { return -1; } if (this.metadata != null) { int count = FieldCount; for (int i = 0 ; i < count ; i++) { if ( (this.metadata[i].baseColumnName != null) && (columnname == this.metadata[i].baseColumnName)) { if (!ADP.IsEmpty(tablename)) { if (tablename == this.metadata[i].baseTableName) { return i; } // end if } // end if else { return i; } // end else } } } // We can't find it in base column names, try regular colnames return this.IndexOf(columnname); } // We try parsing the SQL statement to get the table name as a last resort when // the driver doesn't return this information back to us. // // we can't handle multiple tablenames (JOIN) // only the first tablename will be returned internal string GetTableNameFromCommandText() { if (command == null){ return null; } String localcmdtext = _cmdText; if (ADP.IsEmpty(localcmdtext)) { // fxcop return null; } String tablename; int idx; CStringTokenizer tokenstmt = new CStringTokenizer(localcmdtext, Connection.QuoteChar(ADP.GetSchemaTable)[0], Connection.EscapeChar(ADP.GetSchemaTable)); if (tokenstmt.StartsWith("select") == true) { // select command, search for from clause idx = tokenstmt.FindTokenIndex("from"); } else { if ((tokenstmt.StartsWith("insert") == true) || (tokenstmt.StartsWith("update") == true) || (tokenstmt.StartsWith("delete") == true) ) { // Get the following word idx = tokenstmt.CurrentPosition; } else idx = -1; } if (idx == -1) return null; // The next token is the table name tablename = tokenstmt.NextToken(); localcmdtext = tokenstmt.NextToken(); if ((localcmdtext.Length > 0) && (localcmdtext[0] == ',')) { return null; // can't handle multiple tables } if ((localcmdtext.Length == 2) && ((localcmdtext[0] == 'a') || (localcmdtext[0] == 'A')) && ((localcmdtext[1] == 's') || (localcmdtext[1] == 'S'))) { // aliased table, skip the alias name localcmdtext = tokenstmt.NextToken(); localcmdtext = tokenstmt.NextToken(); if ((localcmdtext.Length > 0) && (localcmdtext[0] == ',')) { return null; // Multiple tables } } return tablename; } internal void SetBaseTableNames(QualifiedTableName qualifiedTableName) { int count = FieldCount; for(int i=0; i 0)) { char quoteChar = _quoteChar[0]; if (str[0] == quoteChar) { Debug.Assert (str.Length > 1, "Illegal string, only one char that is a quote"); Debug.Assert (str[str.Length-1] == quoteChar, "Missing quote at end of string that begins with quote"); if (str.Length > 1 && str[str.Length-1] == quoteChar) { str = str.Substring(1, str.Length-2); } } } return str; } } sealed private class MetaData { internal int ordinal; internal TypeMap typemap; internal SQLLEN size; internal byte precision; internal byte scale; internal bool isAutoIncrement; internal bool isUnique; internal bool isReadOnly; internal bool isNullable; internal bool isRowVersion; internal bool isLong; internal bool isKeyColumn; internal string baseSchemaName; internal string baseCatalogName; internal string baseTableName; internal string baseColumnName; } } } // File provided for Reference Use Only by Microsoft Corporation (c) 2007.
Link Menu

This book is available now!
Buy at Amazon US or
Buy at Amazon UK
- AutomationElement.cs
- InkCanvasSelectionAdorner.cs
- Rectangle.cs
- cryptoapiTransform.cs
- TrackingDataItem.cs
- DataRecordInfo.cs
- TimeSpanValidator.cs
- RelatedImageListAttribute.cs
- Serializer.cs
- XsltOutput.cs
- DefaultEventAttribute.cs
- RowToFieldTransformer.cs
- Effect.cs
- DataBindingHandlerAttribute.cs
- unitconverter.cs
- SelectionItemProviderWrapper.cs
- SiteMapNodeItemEventArgs.cs
- SolidBrush.cs
- PackagingUtilities.cs
- LinqDataSourceInsertEventArgs.cs
- SystemDropShadowChrome.cs
- StringFormat.cs
- StateMachine.cs
- SafeSecurityHandles.cs
- StringSource.cs
- TextParagraphProperties.cs
- ControlCodeDomSerializer.cs
- TypeConverterValueSerializer.cs
- OracleMonthSpan.cs
- RadioButton.cs
- TransformPattern.cs
- PackagePart.cs
- InfoCardCryptoHelper.cs
- SafeNativeMethodsMilCoreApi.cs
- DiscriminatorMap.cs
- DBSqlParserColumn.cs
- NavigateEvent.cs
- FormattedTextSymbols.cs
- QueryCoreOp.cs
- SqlInternalConnection.cs
- WebPartTransformer.cs
- HtmlHistory.cs
- InteropTrackingRecord.cs
- ToolboxItemFilterAttribute.cs
- DocumentXmlWriter.cs
- SymLanguageType.cs
- PixelFormatConverter.cs
- CriticalHandle.cs
- FloaterParagraph.cs
- OletxTransactionManager.cs
- RuleRefElement.cs
- DictionaryEntry.cs
- DbParameterCollectionHelper.cs
- Set.cs
- HttpCookieCollection.cs
- ControlType.cs
- ProxyManager.cs
- StickyNote.cs
- WebPartCancelEventArgs.cs
- Win32.cs
- VerificationException.cs
- DateTimeOffset.cs
- DataSourceUtil.cs
- HMACRIPEMD160.cs
- Activator.cs
- TextContainerChangedEventArgs.cs
- BulletedListEventArgs.cs
- SqlConnectionHelper.cs
- HttpConfigurationSystem.cs
- ResourceReferenceExpression.cs
- WinEventHandler.cs
- CommonProperties.cs
- ConditionalBranch.cs
- AttachmentCollection.cs
- SQLBinary.cs
- FilterElement.cs
- CompoundFileReference.cs
- CharacterBufferReference.cs
- DataSourceConverter.cs
- TreeNodeSelectionProcessor.cs
- WebScriptEnablingBehavior.cs
- TextHidden.cs
- CurrentChangingEventManager.cs
- DesignerForm.cs
- DocumentScope.cs
- translator.cs
- SimpleWebHandlerParser.cs
- baseaxisquery.cs
- _KerberosClient.cs
- TextTreeUndo.cs
- ObjectItemAttributeAssemblyLoader.cs
- ToolstripProfessionalRenderer.cs
- WindowsTooltip.cs
- ProfileManager.cs
- TreeViewDesigner.cs
- RuntimeHandles.cs
- DragEventArgs.cs
- RotateTransform.cs
- MessagePartDescription.cs
- RegexCaptureCollection.cs