Code:
/ 4.0 / 4.0 / DEVDIV_TFS / Dev10 / Releases / RTMRel / ndp / fx / src / Core / System / Linq / Parallel / Merging / DefaultMergeHelper.cs / 1305376 / DefaultMergeHelper.cs
// ==++== // // Copyright (c) Microsoft Corporation. All rights reserved. // // ==--== // =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ // // DefaultMergeHelper.cs // //[....] // // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using System.Diagnostics.Contracts; namespace System.Linq.Parallel { ////// The default merge helper uses a set of straightforward algorithms for output /// merging. Namely, for synchronous merges, the input data is yielded from the /// input data streams in "depth first" left-to-right order. For asynchronous merges, /// on the other hand, we use a biased choice algorithm to favor input channels in /// a "fair" way. No order preservation is carried out by this helper. /// ////// internal class DefaultMergeHelper : IMergeHelper { private QueryTaskGroupState m_taskGroupState; // State shared among tasks. private PartitionedStream m_partitions; // Source partitions. private AsynchronousChannel [] m_asyncChannels; // Destination channels (async). private SynchronousChannel [] m_syncChannels; // Destination channels ([....]). private IEnumerator m_channelEnumerator; // Output enumerator. private TaskScheduler m_taskScheduler; // The task manager to execute the query. private bool m_ignoreOutput; // Whether we're enumerating "for effect". //------------------------------------------------------------------------------------ // Instantiates a new merge helper. // // Arguments: // partitions - the source partitions from which to consume data. // ignoreOutput - whether we're enumerating "for effect" or for output. // pipeline - whether to use a pipelined merge. // internal DefaultMergeHelper(PartitionedStream partitions, bool ignoreOutput, ParallelMergeOptions options, TaskScheduler taskScheduler, CancellationState cancellationState, int queryId) { Contract.Assert(partitions != null); m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId); m_partitions = partitions; m_taskScheduler = taskScheduler; m_ignoreOutput = ignoreOutput; TraceHelpers.TraceInfo("DefaultMergeHelper::.ctor(..): creating a default merge helper"); // If output won't be ignored, we need to manufacture a set of channels for the consumer. // Otherwise, when the merge is executed, we'll just invoke the activities themselves. if (!ignoreOutput) { // Create the asynchronous or synchronous channels, based on whether we're pipelining. if (options != ParallelMergeOptions.FullyBuffered) { if (partitions.PartitionCount > 1) { m_asyncChannels = MergeExecutor .MakeAsynchronousChannels(partitions.PartitionCount, options, cancellationState.MergedCancellationToken); m_channelEnumerator = new AsynchronousChannelMergeEnumerator (m_taskGroupState, m_asyncChannels); } else { // If there is only one partition, we don't need to create channels. The only producer enumerator // will be used as the result enumerator. m_channelEnumerator = ExceptionAggregator.WrapQueryEnumerator(partitions[0], m_taskGroupState.CancellationState).GetEnumerator(); } } else { m_syncChannels = MergeExecutor .MakeSynchronousChannels(partitions.PartitionCount); m_channelEnumerator = new SynchronousChannelMergeEnumerator (m_taskGroupState, m_syncChannels); } Contract.Assert(m_asyncChannels == null || m_asyncChannels.Length == partitions.PartitionCount); Contract.Assert(m_syncChannels == null || m_syncChannels.Length == partitions.PartitionCount); Contract.Assert(m_channelEnumerator != null, "enumerator can't be null if we're not ignoring output"); } } //----------------------------------------------------------------------------------- // Schedules execution of the merge itself. // // Arguments: // ordinalIndexState - the state of the ordinal index of the merged partitions // void IMergeHelper .Execute() { if (m_asyncChannels != null) { SpoolingTask.SpoolPipeline (m_taskGroupState, m_partitions, m_asyncChannels, m_taskScheduler); } else if (m_syncChannels != null) { SpoolingTask.SpoolStopAndGo (m_taskGroupState, m_partitions, m_syncChannels, m_taskScheduler); } else if (m_ignoreOutput) { SpoolingTask.SpoolForAll (m_taskGroupState, m_partitions, m_taskScheduler); } else { // The last case is a pipelining merge when DOP = 1. In this case, the consumer thread itself will compute the results, // so we don't need any tasks to compute the results asynchronously. Contract.Assert(m_partitions.PartitionCount == 1); } } //----------------------------------------------------------------------------------- // Gets the enumerator from which to enumerate output results. // IEnumerator IMergeHelper .GetEnumerator() { Contract.Assert(m_ignoreOutput || m_channelEnumerator != null); return m_channelEnumerator; } //----------------------------------------------------------------------------------- // Returns the results as an array. // // @ public TInputOutput[] GetResultsAsArray() { if (m_syncChannels != null) { // Right size an array. int totalSize = 0; for (int i = 0; i < m_syncChannels.Length; i++) { totalSize += m_syncChannels[i].Count; } TInputOutput[] array = new TInputOutput[totalSize]; // And then blit the elements in. int current = 0; for (int i = 0; i < m_syncChannels.Length; i++) { m_syncChannels[i].CopyTo(array, current); current += m_syncChannels[i].Count; } return array; } else { List output = new List (); using (IEnumerator enumerator = ((IMergeHelper )this).GetEnumerator()) { while (enumerator.MoveNext()) { output.Add(enumerator.Current); } } return output.ToArray(); } } } } // File provided for Reference Use Only by Microsoft Corporation (c) 2007. // ==++== // // Copyright (c) Microsoft Corporation. All rights reserved. // // ==--== // =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ // // DefaultMergeHelper.cs // // [....] // // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using System.Diagnostics.Contracts; namespace System.Linq.Parallel { ////// The default merge helper uses a set of straightforward algorithms for output /// merging. Namely, for synchronous merges, the input data is yielded from the /// input data streams in "depth first" left-to-right order. For asynchronous merges, /// on the other hand, we use a biased choice algorithm to favor input channels in /// a "fair" way. No order preservation is carried out by this helper. /// ////// internal class DefaultMergeHelper : IMergeHelper { private QueryTaskGroupState m_taskGroupState; // State shared among tasks. private PartitionedStream m_partitions; // Source partitions. private AsynchronousChannel [] m_asyncChannels; // Destination channels (async). private SynchronousChannel [] m_syncChannels; // Destination channels ([....]). private IEnumerator m_channelEnumerator; // Output enumerator. private TaskScheduler m_taskScheduler; // The task manager to execute the query. private bool m_ignoreOutput; // Whether we're enumerating "for effect". //------------------------------------------------------------------------------------ // Instantiates a new merge helper. // // Arguments: // partitions - the source partitions from which to consume data. // ignoreOutput - whether we're enumerating "for effect" or for output. // pipeline - whether to use a pipelined merge. // internal DefaultMergeHelper(PartitionedStream partitions, bool ignoreOutput, ParallelMergeOptions options, TaskScheduler taskScheduler, CancellationState cancellationState, int queryId) { Contract.Assert(partitions != null); m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId); m_partitions = partitions; m_taskScheduler = taskScheduler; m_ignoreOutput = ignoreOutput; TraceHelpers.TraceInfo("DefaultMergeHelper::.ctor(..): creating a default merge helper"); // If output won't be ignored, we need to manufacture a set of channels for the consumer. // Otherwise, when the merge is executed, we'll just invoke the activities themselves. if (!ignoreOutput) { // Create the asynchronous or synchronous channels, based on whether we're pipelining. if (options != ParallelMergeOptions.FullyBuffered) { if (partitions.PartitionCount > 1) { m_asyncChannels = MergeExecutor .MakeAsynchronousChannels(partitions.PartitionCount, options, cancellationState.MergedCancellationToken); m_channelEnumerator = new AsynchronousChannelMergeEnumerator (m_taskGroupState, m_asyncChannels); } else { // If there is only one partition, we don't need to create channels. The only producer enumerator // will be used as the result enumerator. m_channelEnumerator = ExceptionAggregator.WrapQueryEnumerator(partitions[0], m_taskGroupState.CancellationState).GetEnumerator(); } } else { m_syncChannels = MergeExecutor .MakeSynchronousChannels(partitions.PartitionCount); m_channelEnumerator = new SynchronousChannelMergeEnumerator (m_taskGroupState, m_syncChannels); } Contract.Assert(m_asyncChannels == null || m_asyncChannels.Length == partitions.PartitionCount); Contract.Assert(m_syncChannels == null || m_syncChannels.Length == partitions.PartitionCount); Contract.Assert(m_channelEnumerator != null, "enumerator can't be null if we're not ignoring output"); } } //----------------------------------------------------------------------------------- // Schedules execution of the merge itself. // // Arguments: // ordinalIndexState - the state of the ordinal index of the merged partitions // void IMergeHelper .Execute() { if (m_asyncChannels != null) { SpoolingTask.SpoolPipeline (m_taskGroupState, m_partitions, m_asyncChannels, m_taskScheduler); } else if (m_syncChannels != null) { SpoolingTask.SpoolStopAndGo (m_taskGroupState, m_partitions, m_syncChannels, m_taskScheduler); } else if (m_ignoreOutput) { SpoolingTask.SpoolForAll (m_taskGroupState, m_partitions, m_taskScheduler); } else { // The last case is a pipelining merge when DOP = 1. In this case, the consumer thread itself will compute the results, // so we don't need any tasks to compute the results asynchronously. Contract.Assert(m_partitions.PartitionCount == 1); } } //----------------------------------------------------------------------------------- // Gets the enumerator from which to enumerate output results. // IEnumerator IMergeHelper .GetEnumerator() { Contract.Assert(m_ignoreOutput || m_channelEnumerator != null); return m_channelEnumerator; } //----------------------------------------------------------------------------------- // Returns the results as an array. // // @ public TInputOutput[] GetResultsAsArray() { if (m_syncChannels != null) { // Right size an array. int totalSize = 0; for (int i = 0; i < m_syncChannels.Length; i++) { totalSize += m_syncChannels[i].Count; } TInputOutput[] array = new TInputOutput[totalSize]; // And then blit the elements in. int current = 0; for (int i = 0; i < m_syncChannels.Length; i++) { m_syncChannels[i].CopyTo(array, current); current += m_syncChannels[i].Count; } return array; } else { List output = new List (); using (IEnumerator enumerator = ((IMergeHelper )this).GetEnumerator()) { while (enumerator.MoveNext()) { output.Add(enumerator.Current); } } return output.ToArray(); } } } } // File provided for Reference Use Only by Microsoft Corporation (c) 2007.
Link Menu
This book is available now!
Buy at Amazon US or
Buy at Amazon UK
- ToolStripArrowRenderEventArgs.cs
- OleDbConnectionFactory.cs
- ContentPlaceHolder.cs
- SevenBitStream.cs
- DoubleCollection.cs
- TargetPerspective.cs
- GenerateTemporaryTargetAssembly.cs
- SqlReferenceCollection.cs
- DesignerTextWriter.cs
- BinaryUtilClasses.cs
- RemotingSurrogateSelector.cs
- TextSelectionHelper.cs
- CatalogZone.cs
- Convert.cs
- EmbeddedMailObject.cs
- FrameworkElementFactory.cs
- CTreeGenerator.cs
- mediapermission.cs
- AttachedPropertyBrowsableAttribute.cs
- FileLoadException.cs
- MimeMapping.cs
- DefaultMemberAttribute.cs
- RelatedImageListAttribute.cs
- ExpressionHelper.cs
- UniqueContractNameValidationBehavior.cs
- CombinedHttpChannel.cs
- EventLogPropertySelector.cs
- AutomationPatternInfo.cs
- ReadOnlyMetadataCollection.cs
- Encoding.cs
- TemplateBindingExpression.cs
- SqlWebEventProvider.cs
- Attributes.cs
- DataControlPagerLinkButton.cs
- BooleanExpr.cs
- ScriptManager.cs
- TypeSystem.cs
- AssociationEndMember.cs
- RangeValueProviderWrapper.cs
- PeerObject.cs
- XmlSchemaValidator.cs
- DocumentAutomationPeer.cs
- SystemIcmpV6Statistics.cs
- IndentTextWriter.cs
- SqlServer2KCompatibilityCheck.cs
- CustomActivityDesigner.cs
- XmlSchemaDocumentation.cs
- CompositeCollection.cs
- AuthenticationServiceManager.cs
- ScriptHandlerFactory.cs
- TraceHandlerErrorFormatter.cs
- SymDocumentType.cs
- LambdaReference.cs
- CircleHotSpot.cs
- SuppressMessageAttribute.cs
- FixedFlowMap.cs
- TypeConverters.cs
- FlowLayoutPanel.cs
- VariantWrapper.cs
- TemplateBindingExpression.cs
- ListView.cs
- RuntimeWrappedException.cs
- ChannelServices.cs
- Typeface.cs
- WpfSharedBamlSchemaContext.cs
- BuildProviderAppliesToAttribute.cs
- Rotation3DAnimationBase.cs
- MarkupProperty.cs
- SettingsBindableAttribute.cs
- Typeface.cs
- DmlSqlGenerator.cs
- AudienceUriMode.cs
- ImportCatalogPart.cs
- DictionaryManager.cs
- ViewUtilities.cs
- ActivityExecutionFilter.cs
- WCFBuildProvider.cs
- TimelineGroup.cs
- ConnectionProviderAttribute.cs
- _BasicClient.cs
- SqlRowUpdatingEvent.cs
- MatrixKeyFrameCollection.cs
- GridViewRowCollection.cs
- BatchParser.cs
- BitArray.cs
- NumberFunctions.cs
- PointAnimation.cs
- JournalEntryListConverter.cs
- PropertyNames.cs
- XPathAncestorQuery.cs
- ReferentialConstraintRoleElement.cs
- Renderer.cs
- EncoderParameter.cs
- DataGridViewHeaderCell.cs
- Rule.cs
- SqlDataSourceView.cs
- SqlDataSourceFilteringEventArgs.cs
- DateTimeOffset.cs
- DynamicILGenerator.cs
- DeflateInput.cs