Example #1
0
HRESULT CDelay::InternalAllocateStreamingResources()
{
	AtlTrace("InternalAllocateStreamingResources()\n");
    
	_ASSERTE(InputType(0)->formattype == FORMAT_WaveFormatEx);
    _ASSERTE(m_dwDelay > 0);

    m_pWave = (WAVEFORMATEX*)InputType(0)->pbFormat;

    // Allocate the buffer that holds the delayed samples   
	m_cbDelayBuffer = (m_dwDelay * m_pWave->nSamplesPerSec * m_pWave->nBlockAlign) / 1000;
	m_pbDelayBuffer = (BYTE*)CoTaskMemAlloc(m_cbDelayBuffer);
	
	if (m_pbDelayBuffer == NULL)
    {
		return E_OUTOFMEMORY;
    }
	
	FillBufferWithSilence();	
	
	m_pbDelayPtr = m_pbDelayBuffer;

	AtlTrace("\tAllocated %d byte buffer.\n", m_cbDelayBuffer);
	DumpWaveformat(m_pWave);	

    return S_OK;
}
Example #2
0
HRESULT CDelay::InternalGetOutputType(DWORD dwOutputStreamIndex, DWORD dwTypeIndex,
                                           DMO_MEDIA_TYPE *pmt)
{
    if (dwTypeIndex != 0)
    {
        return DMO_E_NO_MORE_ITEMS;
    }

    // if pmt is NULL, we just return S_OK if the type index is in range
    if (pmt == NULL)
    {
        return S_OK;
    }

    if (InputTypeSet(0))   // If the input type is set, we prefer that one
    {

        return MoCopyMediaType(pmt, InputType(0));
    }
    else {
        
	    // input type is not set, propose something we like
        return GetPcmType(pmt);
    }
}
///////////////////////////////////
//
//  IMediaObjectImpl::InternalGetOutputType
//
//  *** Called by GetOutputType, description below ***
//
//  The GetOutputType method retrieves a preferred media type for a specified
//  output stream.
//
//  Parameters
//
//      dwOutputStreamIndex
//          Zero-based index of an output stream on the DMO.
//
//      dwTypeIndex
//          Zero-based index on the set of acceptable media types.
//
//      pmt
//          [out] Pointer to a DMO_MEDIA_TYPE structure allocated by the
//          caller. The method fills the structure with the media type. The
//          format block might be NULL, in which case the format type GUID is GUID_NULL.
//
//  Return Value
//      S_OK Success
//      DMO_E_INVALIDSTREAMINDEX Invalid stream index
//      DMO_E_NO_MORE_ITEMS Type index is out of range
//      E_OUTOFMEMORY Insufficient memory
//      E_POINTER NULL pointer argument
//
//  Call this method to enumerate an output stream's preferred media types. The
//  DMO assigns each media type an index value, in order of preference. The
//  most preferred type has an index of zero. To enumerate all the types, make
//  successive calls while incrementing the type index, until the method returns
//  DMO_E_NO_MORE_ITEMS.
//
//  If the method succeeds, call MoFreeMediaType to free the format block.
//
//  To set the media type, call the SetOutputType method. Setting the media type
//  on one stream can change another stream's preferred types. In fact, a stream
//  might not have a preferred type until the type is set on another stream. For
//  example, a decoder might not have a preferred output type until the input
//  type is set. However, the DMO is not required to update its preferred types
//  dynamically in this fashion. Thus, the types returned by this method are not
//  guaranteed to be valid; they might fail when used in the SetOutputType method.
//  Conversely, the DMO is not guaranteed to enumerate every media type that it
//  supports. To test whether a particular media type is acceptable, call
//  SetOutputType with the DMO_SET_TYPEF_TEST_ONLY flag.
//
//
HRESULT CHXAudioDeviceHookBase::InternalGetOutputType(DWORD dwOutputStreamIndex, DWORD dwTypeIndex, DMO_MEDIA_TYPE *pmt)
{
    // This function resembles InternalGetInputType() since the input and output types must
    // be consistent for DirectSound

    HRESULT hr = S_OK;

    if (dwTypeIndex > 0)
    {
        return DMO_E_NO_MORE_ITEMS;
    }

    // If pmt is NULL, and the type index is in range, we return S_OK
    if (pmt == NULL)
    {
        return S_OK;
    }

    // If the input type is set, we prefer to use that one
    if (InputTypeSet(0))
    {
        return MoCopyMediaType(pmt, InputType(0));
    }

    hr = MoInitMediaType(pmt, sizeof(WAVEFORMATEX));

    if (SUCCEEDED(hr))
    {
        pmt->majortype  = MEDIATYPE_Audio;
        pmt->subtype    = MEDIASUBTYPE_PCM;         // We take PCM format!
        pmt->formattype = FORMAT_None;
    }

    return hr;
}
Example #4
0
HRESULT CDelay::InternalGetInputSizeInfo(DWORD dwInputStreamIndex, DWORD *pcbSize,
                                              DWORD *pcbMaxLookahead, DWORD *pcbAlignment)
{

    // IMediaObjectImpl validates this for us... 
    _ASSERTE(InputTypeSet(dwInputStreamIndex));

    // And we expect only PCM audio types.
    _ASSERTE(InputType(dwInputStreamIndex)->formattype == FORMAT_WaveFormatEx);
    
    WAVEFORMATEX *pWave = (WAVEFORMATEX*)InputType(dwInputStreamIndex)->pbFormat;
    
    *pcbSize = pWave->nBlockAlign;
    *pcbMaxLookahead = 0;
    *pcbAlignment = 1;
    
    return S_OK; 
}
////////////////////////////////////////////
//
//  IMediaObjectImpl::InternalCheckOutputType
//
//  Queries whether an output stream can accept a given media type. The derived
//  class must declare and implement this method.
//
//  Parameters
//
//      dwOutputStreamIndex
//          Index of an output stream.
//
//      pmt
//          Pointer to a DMO_MEDIA_TYPE structure that describes the media type.
//
//  Return Value
//
//      Returns S_OK if the media type is valid, or DMO_E_INVALIDTYPE otherwise.
//
//  Note:
//
//  Called by IMediaObject::SetOutputType
//
HRESULT CHXAudioDeviceHookBase::InternalCheckOutputType(DWORD dwOutputStreamIndex,const DMO_MEDIA_TYPE *pmt)
{
    // Check that we're PCM or float
    HRESULT hr = S_OK;

    if ((NULL                       == pmt) ||
        (MEDIATYPE_Audio            != pmt->majortype) ||
        (MEDIASUBTYPE_PCM           != pmt->subtype) ||
        (FORMAT_WaveFormatEx        != pmt->formattype &&
         FORMAT_None                != pmt->formattype) ||
        (pmt->cbFormat              <  sizeof(WAVEFORMATEX)) ||
        (NULL                       == pmt->pbFormat))
    {
        hr = DMO_E_INVALIDTYPE;
    }

    // If other type set, accept only if identical to that.  Otherwise accept
    // any standard PCM/float audio.
    if (SUCCEEDED(hr))
    {
        if (InputTypeSet(0))
        {
            const DMO_MEDIA_TYPE* pmtInput;
            pmtInput = InputType(0);
            if (memcmp(pmt->pbFormat, pmtInput->pbFormat, sizeof(WAVEFORMATEX)))
            {
                hr = DMO_E_INVALIDTYPE;
            }
        }
        else
        {
            WAVEFORMATEX* pWave = (WAVEFORMATEX*)pmt->pbFormat;
            if ((WAVE_FORMAT_PCM != pWave->wFormatTag) ||
                ((8 != pWave->wBitsPerSample) && (16 != pWave->wBitsPerSample)) ||
                ((1 != pWave->nChannels) && (2 != pWave->nChannels)) ||
                ( // Supported sample rates:
                 (96000 != pWave->nSamplesPerSec) &&
                 (48000 != pWave->nSamplesPerSec) &&
                 (44100 != pWave->nSamplesPerSec) &&
                 (32000 != pWave->nSamplesPerSec) &&
                 (22050 != pWave->nSamplesPerSec) &&
                 (16000 != pWave->nSamplesPerSec) &&
                 (11025 != pWave->nSamplesPerSec) &&
                 (8000 != pWave->nSamplesPerSec) &&
                 TRUE   // You may delete && TRUE
                ) ||
                (pWave->nBlockAlign != pWave->nChannels * pWave->wBitsPerSample / 8) ||
                (pWave->nAvgBytesPerSec != pWave->nSamplesPerSec * pWave->nBlockAlign))
            {
                hr = DMO_E_INVALIDTYPE;
            }
        }
    }

    return hr;
}
////////////////////////////////////
//
//  IMediaObjectImpl::InternalGetInputSizeInfo
//
//  *** Called by GetInputSizeInfo, description below ***
//
//  The GetInputSizeInfo method retrieves the buffer requirements for a
//  specified input stream.
//
//  Parameters
//
//  dwInputStreamIndex:     Zero-based index of an input stream on the DMO.
//
//  pcbSize:                [out] Pointer to a variable that receives
//      the minimum size of an input buffer for this stream, in bytes.
//
//  pulSizeMaxLookahead:        [out] Pointer to a variable that receives the
//      maximum amount of data that the DMO will hold for lookahead, in bytes.
//      If the DMO does not perform lookahead on the stream, the value is zero.
//
//  pulSizeAlignment            [out] Pointer to a variable that receives the
//      required buffer alignment, in bytes. If the input stream has no
//      alignment requirement, the value is 1.
//
//  Return Value
//      S_OK Success
//      DMO_E_INVALIDSTREAMINDEX Invalid stream index
//      DMO_E_TYPE_NOT_SET Media type was not set
//
//  The buffer requirements may depend on the media types of the various
//  streams. Before calling this method, set the media type of each stream
//  by calling the SetInputType and SetOutputType methods. If the media types
//  have not been set, this method might return an error.
//
//  If the DMO performs lookahead on the input stream, it returns the
//  DMO_INPUT_STREAMF_HOLDS_BUFFERS flag in the GetInputStreamInfo method.
//  During processing, the DMO holds up to the number of bytes indicated by the
//  pulSizeMaxLookahead parameter. The application must allocate enough buffers for
//  the DMO to hold this much data.
//
//  A buffer is aligned if the buffer's start address is a multiple of
//  *pulSizeAlignment. The alignment must be a power of two. Depending on the
//  microprocessor, reads and writes to an aligned buffer might be faster than
//  to an unaligned buffer. Also, some microprocessors do not support unaligned
//  reads and writes.
//
//  Note:
//
//  GetInputSizeInfo returns DMO_E_TYPE_NOT_SET unless all of the non-optional
//  streams have media types. Therefore, in the derived class, the internal
//  methods can assume that all of the non-optional streams have media types.
//
HRESULT CHXAudioDeviceHookBase::InternalGetInputSizeInfo(DWORD dwInputStreamIndex, DWORD *pcbSize, DWORD *pulSizeMaxLookahead, DWORD *pulSizeAlignment)
{
    // We don't have to do any validation, because it is all done in the base class

    HRESULT hr = S_OK;
    const DMO_MEDIA_TYPE* pmt;
    pmt = InputType(0);
    const WAVEFORMATEX* pwfx = reinterpret_cast<const WAVEFORMATEX*>(pmt->pbFormat);
    *pcbSize = pwfx->nChannels * pwfx->wBitsPerSample / 8;
    *pulSizeMaxLookahead = 0;   // no look ahead
    *pulSizeAlignment = 1;      // no alignment requirement

    return hr;
}
Example #7
0
STDMETHODIMP CDelay::Clone(IMediaObjectInPlace **ppMediaObject)
{
    HRESULT hr;


    if (!ppMediaObject)
    {
        return E_POINTER;
    }

    *ppMediaObject = NULL;

    // Make a new one
    CDelay *pTemp = new CComObject<CDelay>;

    if (!pTemp)
    {
        return E_OUTOFMEMORY;
    }
    
    // Set the media types
    CComQIPtr<IMediaObject, &IID_IMediaObject> pMediaObj(pTemp);
    _ASSERTE(pMediaObj != NULL);

    if (InputTypeSet(0))
    {
        hr = pMediaObj->SetInputType(0, InputType(0), 0);
        if (FAILED(hr))
        {
            return hr;
        }
    }

    if (OutputTypeSet(0))
    {
        hr = pMediaObj->SetOutputType(0, OutputType(0), 0);
        if (FAILED(hr))
        {
            return hr;
        }
    }

    // Everything is OK, return the AddRef'd pointer.
    return pTemp->QueryInterface(IID_IMediaObjectInPlace, (void**)ppMediaObject);
}
Example #8
0
HRESULT CDMODecoder::InternalAllocateStreamingResources()
{
	LOGPRINTF("%p CDMODecoder::InternalAllocateStreamingResources()", this);

	const DMO_MEDIA_TYPE *pmtIn  = InputType(0);
	const DMO_MEDIA_TYPE *pmtOut = OutputType(0);
	const VIDEOINFOHEADER *pvihIn  = (const VIDEOINFOHEADER *)pmtIn->pbFormat;
	const VIDEOINFOHEADER *pvihOut = (const VIDEOINFOHEADER *)pmtOut->pbFormat;
	utvf_t outfmt;

	if (DirectShowFormatToUtVideoFormat(&outfmt, pvihOut->bmiHeader.biCompression, pvihOut->bmiHeader.biBitCount, pmtOut->subtype) != 0)
		return DMO_E_INVALIDTYPE;

	if (m_pCodec->DecodeBegin(outfmt, pvihOut->bmiHeader.biWidth, pvihOut->bmiHeader.biHeight, CBGROSSWIDTH_WINDOWS,
			((BYTE *)&pvihIn->bmiHeader) + sizeof(BITMAPINFOHEADER), pvihIn->bmiHeader.biSize - sizeof(BITMAPINFOHEADER)) == 0)
		return S_OK;
	else
		return E_FAIL;
}
void Tokenizer<TokenEnumType, InputType>::readToken(InputIterator start, InputIterator end, TokenType &outputToken) const
{
    outputToken.type = TokenEnumType::Invalid;

    auto accepted = start;
    auto lastAccepted = start;

    auto currentState = m_rootState;

    auto iterator = start;

    for (; iterator != end; ++iterator)
    {
        auto transition = currentState->m_edges.find( *iterator );
        auto transitionFound = transition != currentState->m_edges.end( );

        auto nextState = transitionFound ?
            transition->second : currentState->m_defaultEdge;

        // terminating state
        if (!nextState)
        {
            accepted = (lastAccepted == start) ? iterator : lastAccepted;

            break;
        }

        if (nextState->m_acceptingType != TokenEnumType::Invalid)
        {
            outputToken.type = nextState->m_acceptingType;

            lastAccepted = iterator + 1;
        }

        currentState = nextState;
    }

    // we read until the end of input
    if (iterator == end)
        accepted = end;

    outputToken.value = InputType( start, accepted );
}
Example #10
0
HRESULT CDelay::InternalCheckOutputType(DWORD dwOutputStreamIndex, const DMO_MEDIA_TYPE *pmt)
{
    // If our output type is already set, reject format changes
    if (OutputTypeSet(dwOutputStreamIndex) && !TypesMatch(pmt, OutputType(dwOutputStreamIndex)))
    {
        return DMO_E_INVALIDTYPE;
    }
    
    // If our input type is already set, the output type must match
    else if (InputTypeSet(dwOutputStreamIndex) && !TypesMatch(pmt, InputType(dwOutputStreamIndex)))
    {
        return DMO_E_INVALIDTYPE;
    }
    
    // If no types are set yet, validate the format 
    else 
    {
        return CheckPcmFormat(pmt);
    }
    
}
void concurrency_levels( size_t concurrency, Body body ) {
    typedef typename std::tuple_element<0,OutputTuple>::type OutputType;
    for ( size_t lc = 1; lc <= concurrency; ++lc ) { 
        tbb::flow::graph g;
        harness_graph_multifunction_executor<InputType, OutputTuple, tbb::spin_mutex>::execute_count = 0;

        tbb::flow::multifunction_node< InputType, OutputTuple, tbb::flow::rejecting > exe_node( g, lc, body );

        for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {

            harness_counting_receiver<OutputType> *receivers = new harness_counting_receiver<OutputType>[num_receivers];

            for (size_t r = 0; r < num_receivers; ++r ) {
                tbb::flow::make_edge( tbb::flow::output_port<0>(exe_node), receivers[r] );
            }

            harness_counting_sender<InputType> *senders = NULL;
    
            for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
                {
                    // lock m to prevent exe_node from finishing
                    tbb::spin_mutex::scoped_lock l( harness_graph_multifunction_executor< InputType, OutputTuple, tbb::spin_mutex >::mutex );
    
                    // put to lc level, it will accept and then block at m
                    for ( size_t c = 0 ; c < lc ; ++c ) {
                        ASSERT( exe_node.try_put( InputType() ) == true, NULL );
                    }
                    // it only accepts to lc level
                    ASSERT( exe_node.try_put( InputType() ) == false, NULL );
    
                    senders = new harness_counting_sender<InputType>[num_senders];
                    for (size_t s = 0; s < num_senders; ++s ) {
                       // register a sender
                       senders[s].my_limit = N;
                       exe_node.register_predecessor( senders[s] );
                    }
    
                } // release lock at end of scope, setting the exe node free to continue
                // wait for graph to settle down
                g.wait_for_all();
    
                // confirm that each sender was requested from N times 
                for (size_t s = 0; s < num_senders; ++s ) {
                    size_t n = senders[s].my_received;
                    ASSERT( n == N, NULL ); 
                    ASSERT( senders[s].my_receiver == &exe_node, NULL );
                }
                // confirm that each receivers got N * num_senders + the initial lc puts
                for (size_t r = 0; r < num_receivers; ++r ) {
                    size_t n = receivers[r].my_count;
                    ASSERT( n == num_senders*N+lc, NULL );
                    receivers[r].my_count = 0;
                }
                delete [] senders;
            }
            for (size_t r = 0; r < num_receivers; ++r ) {
                tbb::flow::remove_edge( tbb::flow::output_port<0>(exe_node), receivers[r] );
            }
            ASSERT( exe_node.try_put( InputType() ) == true, NULL );
            g.wait_for_all();
            for (size_t r = 0; r < num_receivers; ++r ) {
                ASSERT( int(receivers[r].my_count) == 0, NULL );
            }
            delete [] receivers;
        }
    }
}
 void operator()( int ) const  {
     for ( int i = 0; i < N; ++i ) {
         // the nodes will accept all puts
         ASSERT( my_exe_node->try_put( InputType() ) == true, NULL );
     }
 }
Example #13
0
void buffered_levels( size_t concurrency, Body body ) {

   // Do for lc = 1 to concurrency level
   for ( size_t lc = 1; lc <= concurrency; ++lc ) {
   tbb::flow::graph g;

   // Set the execute_counter back to zero in the harness
   harness_graph_executor<InputType, OutputType>::execute_count = 0;
   // Set the number of current executors to zero.
   harness_graph_executor<InputType, OutputType>::current_executors = 0;
   // Set the max allowed executors to lc.  There is a check in the functor to make sure this is never exceeded.
   harness_graph_executor<InputType, OutputType>::max_executors = lc;

   // Create the function_node with the appropriate concurrency level, and use default buffering
   tbb::flow::function_node< InputType, OutputType > exe_node( g, lc, body );
   tbb::flow::function_node<InputType, InputType> pass_thru( g, tbb::flow::unlimited, pass_through<InputType>());

   // Create a vector of identical exe_nodes and pass_thrus
   std::vector< tbb::flow::function_node< InputType, OutputType > > exe_vec(2, exe_node);
   std::vector< tbb::flow::function_node< InputType, InputType > > pass_thru_vec(2, pass_thru);
   // Attach each pass_thru to its corresponding exe_node
   for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) {
       tbb::flow::make_edge(pass_thru_vec[node_idx], exe_vec[node_idx]);
   }

   // TODO: why the test is executed serially for the node pairs, not concurrently?
   for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) {
   // For num_receivers = 1 to MAX_NODES
   for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
        // Create num_receivers counting receivers and connect the exe_vec[node_idx] to them.
        std::vector< harness_mapped_receiver<OutputType>* > receivers(num_receivers);
        for (size_t i = 0; i < num_receivers; i++) {
            receivers[i] = new harness_mapped_receiver<OutputType>(g);
        }

        for (size_t r = 0; r < num_receivers; ++r ) {
            tbb::flow::make_edge( exe_vec[node_idx], *receivers[r] );
        }

        // Do the test with varying numbers of senders
        harness_counting_sender<InputType> *senders = NULL;
        for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
            // Create num_senders senders, set there message limit each to N, and connect them to pass_thru_vec[node_idx]
            senders = new harness_counting_sender<InputType>[num_senders];
            for (size_t s = 0; s < num_senders; ++s ) {
               senders[s].my_limit = N;
               senders[s].register_successor(pass_thru_vec[node_idx] );
            }

            // Initialize the receivers so they know how many senders and messages to check for
            for (size_t r = 0; r < num_receivers; ++r ) {
                 receivers[r]->initialize_map( N, num_senders );
            }

            // Do the test
            NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) );
            g.wait_for_all();

            // confirm that each sender was requested from N times
            for (size_t s = 0; s < num_senders; ++s ) {
                size_t n = senders[s].my_received;
                ASSERT( n == N, NULL );
                ASSERT( senders[s].my_receiver == &pass_thru_vec[node_idx], NULL );
            }
            // validate the receivers
            for (size_t r = 0; r < num_receivers; ++r ) {
                receivers[r]->validate();
            }
            delete [] senders;
        }
        for (size_t r = 0; r < num_receivers; ++r ) {
            tbb::flow::remove_edge( exe_vec[node_idx], *receivers[r] );
        }
        ASSERT( exe_vec[node_idx].try_put( InputType() ) == true, NULL );
        g.wait_for_all();
        for (size_t r = 0; r < num_receivers; ++r ) {
            // since it's detached, nothing should have changed
            receivers[r]->validate();
        }

        for (size_t i = 0; i < num_receivers; i++) {
            delete receivers[i];
        }

    } // for num_receivers
    } // for node_idx
    } // for concurrency level lc
}
Example #14
0
void concurrency_levels( size_t concurrency, Body body ) {

   for ( size_t lc = 1; lc <= concurrency; ++lc ) {
       tbb::flow::graph g;

       // Set the execute_counter back to zero in the harness
       harness_graph_executor<InputType, OutputType>::execute_count = 0;
       // Set the number of current executors to zero.
       harness_graph_executor<InputType, OutputType>::current_executors = 0;
       // Set the max allowed executors to lc. There is a check in the functor to make sure this is never exceeded.
       harness_graph_executor<InputType, OutputType>::max_executors = lc;

       typedef tbb::flow::function_node< InputType, OutputType, tbb::flow::rejecting > fnode_type;
       fnode_type exe_node( g, lc, body );

       for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {

            std::vector< harness_counting_receiver<OutputType> > receivers(num_receivers, harness_counting_receiver<OutputType>(g));

#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
            ASSERT(exe_node.successor_count() == 0, NULL);
            ASSERT(exe_node.predecessor_count() == 0, NULL);
#endif

            for (size_t r = 0; r < num_receivers; ++r ) {
                tbb::flow::make_edge( exe_node, receivers[r] );
            }
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
            ASSERT(exe_node.successor_count() == num_receivers, NULL);
            typename fnode_type::successor_list_type my_succs;
            exe_node.copy_successors(my_succs);
            ASSERT(my_succs.size() == num_receivers, NULL);
            typename fnode_type::predecessor_list_type my_preds;
            exe_node.copy_predecessors(my_preds);
            ASSERT(my_preds.size() == 0, NULL);
#endif

            harness_counting_sender<InputType> *senders = NULL;

            for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
                senders = new harness_counting_sender<InputType>[num_senders];
                {
                    // Exclusively lock m to prevent exe_node from finishing
                    tbb::spin_rw_mutex::scoped_lock l( harness_graph_executor<InputType, OutputType>::template mutex_holder<tbb::spin_rw_mutex>::mutex );

                    // put to lc level, it will accept and then block at m
                    for ( size_t c = 0 ; c < lc ; ++c ) {
                        ASSERT( exe_node.try_put( InputType() ) == true, NULL );
                    }
                    // it only accepts to lc level
                    ASSERT( exe_node.try_put( InputType() ) == false, NULL );

                    for (size_t s = 0; s < num_senders; ++s ) {
                       // register a sender
                       senders[s].my_limit = N;
                       exe_node.register_predecessor( senders[s] );
                    }

                } // release lock at end of scope, setting the exe node free to continue
                // wait for graph to settle down
                g.wait_for_all();

                // confirm that each sender was requested from N times
                for (size_t s = 0; s < num_senders; ++s ) {
                    size_t n = senders[s].my_received;
                    ASSERT( n == N, NULL );
                    ASSERT( senders[s].my_receiver == &exe_node, NULL );
                }
                // confirm that each receivers got N * num_senders + the initial lc puts
                for (size_t r = 0; r < num_receivers; ++r ) {
                    size_t n = receivers[r].my_count;
                    ASSERT( n == num_senders*N+lc, NULL );
                    receivers[r].my_count = 0;
                }
                delete [] senders;
            }
            for (size_t r = 0; r < num_receivers; ++r ) {
                tbb::flow::remove_edge( exe_node, receivers[r] );
            }
            ASSERT( exe_node.try_put( InputType() ) == true, NULL );
            g.wait_for_all();
            for (size_t r = 0; r < num_receivers; ++r ) {
                ASSERT( int(receivers[r].my_count) == 0, NULL );
            }
        }
    }
}
Example #15
0
void buffered_levels_with_copy( size_t concurrency ) {

    // Do for lc = 1 to concurrency level
    for ( size_t lc = 1; lc <= concurrency; ++lc ) {
        tbb::flow::graph g;

        inc_functor cf;
        cf.local_execute_count = Offset;
        global_execute_count = Offset;

        tbb::flow::function_node< InputType, OutputType > exe_node( g, lc, cf );

        for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {

           std::vector< harness_mapped_receiver<OutputType>* > receivers(num_receivers);
           for (size_t i = 0; i < num_receivers; i++) {
               receivers[i] = new harness_mapped_receiver<OutputType>(g);
           }

           for (size_t r = 0; r < num_receivers; ++r ) {
               tbb::flow::make_edge( exe_node, *receivers[r] );
            }

            harness_counting_sender<InputType> *senders = NULL;
            for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
                senders = new harness_counting_sender<InputType>[num_senders];
                for (size_t s = 0; s < num_senders; ++s ) {
                    senders[s].my_limit = N;
                    tbb::flow::make_edge( senders[s], exe_node );
                }

                for (size_t r = 0; r < num_receivers; ++r ) {
                    receivers[r]->initialize_map( N, num_senders );
                }

                NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) );
                g.wait_for_all();

                for (size_t s = 0; s < num_senders; ++s ) {
                    size_t n = senders[s].my_received;
                    ASSERT( n == N, NULL );
                    ASSERT( senders[s].my_receiver == &exe_node, NULL );
                }
                for (size_t r = 0; r < num_receivers; ++r ) {
                    receivers[r]->validate();
                }
                delete [] senders;
            }
            for (size_t r = 0; r < num_receivers; ++r ) {
                tbb::flow::remove_edge( exe_node, *receivers[r] );
            }
            ASSERT( exe_node.try_put( InputType() ) == true, NULL );
            g.wait_for_all();
            for (size_t r = 0; r < num_receivers; ++r ) {
                receivers[r]->validate();
            }

            for (size_t i = 0; i < num_receivers; i++) {
                delete receivers[i];
            }
        }

        // validate that the local body matches the global execute_count and both are correct
        inc_functor body_copy = tbb::flow::copy_body<inc_functor>( exe_node );
        const size_t expected_count = N/2 * MAX_NODES * MAX_NODES * ( MAX_NODES + 1 ) + MAX_NODES + Offset;
        size_t global_count = global_execute_count;
        size_t inc_count = body_copy.local_execute_count;
        ASSERT( global_count == expected_count && global_count == inc_count, NULL );
        g.reset(tbb::flow::rf_reset_bodies);
        body_copy = tbb::flow::copy_body<inc_functor>( exe_node );
        inc_count = body_copy.local_execute_count;
        ASSERT( Offset == inc_count, "reset(rf_reset_bodies) did not reset functor" );
    }
}
Example #16
0
int EXECUTE(int cmdId, CmCParser *script, void **paramTable)
{
  switch(cmdId) {

    //***************************************************
    //set global flags
    //***************************************************

  case CMD_SYNERGISTIC: {
    CmCToken *token;
    token = script->GetToken(); //get flag
    if(!strcmp(token->token_, "ON")) {
      CmCSynergistic = true;
      CmCPrompt("Synergistic Segmentation ENABLED.\n");
    } else {
      CmCSynergistic = false;
      CmCPrompt("Synergistic Segmentation DISABLED.\n");
    }
    script->GetToken(); //skip ';'
    break;
  }

  case CMD_DISPLAY_PROGRESS: {
    CmCToken *token;
    token = script->GetToken(); //get flag
    if(!strcmp(token->token_, "ON"))
      CmCDisplayProgress = true;
    else
      CmCDisplayProgress = false;
    script->GetToken(); //skip ';'
    CmCPrompt("Display progress ENABLED.\n");
    break;
  }

  case CMD_USE_CUSTOM_WEIGHT_MAP: {
    CmCToken *token;
    token = script->GetToken(); //get flag
    if(!strcmp(token->token_, "ON")) {
      CmCUseCustomWeightMap = true;
      CmCPrompt("Custum weight map IN-USE (if defined).\n");
    } else {
      CmCUseCustomWeightMap = false;
      CmCPrompt("Custum weight map IN-ACTIVE.\n");
    }
    script->GetToken(); //skip ';'
    break;
  }

    //***************************************************
    //load a file
    //***************************************************

  case CMD_LOAD: {
    CmCToken *token;
    script->GetToken(); //skip "('"
    script->GetToken();
    token = script->GetToken(); //get filename
    char *filename = new char[strlen(token->token_)+1];
    strcpy(filename, token->token_);
    script->GetToken(); //skip "',"
    script->GetToken();
    token = script->GetToken(); //get input type
    int inputtype = InputType(token->token_);
    script->GetToken(); //skip ");"
    script->GetToken();    

    //load file
    int error = edison.Load(filename, inputtype);
    if(!error) CmCPrompt("File '%s' has been successfully loaded!\n", filename);
    delete [] filename;
   
    //return any errors
    if(error) return error;
    
    break;
  }

    //***************************************************
    //save a file
    //***************************************************

  case CMD_SAVE: {
    CmCToken *token;
    script->GetToken(); //skip "('"
    script->GetToken();
    token = script->GetToken(); //get filename
    char *filename = new char [strlen(token->token_) + 1];
    strcpy(filename, token->token_);
    script->GetToken(); //skip "',"
    script->GetToken();
    token = script->GetToken(); //filetype
    int filetype = FileType(token->token_);
    script->GetToken(); //skip ','
    token = script->GetToken(); //get output type
    int outputtype = OutputType(token->token_);
    script->GetToken(); //skip ");"
    script->GetToken();
    
    //save file
    int error = edison.Save(filename, filetype, outputtype);
    if(!error) CmCPrompt("File '%s' has been successfully saved!\n", filename);
    delete [] filename;
    
    //return any errors
    if(error) return error;
    break;
  }

    //***************************************************
    //route output to input
    //***************************************************

  case CMD_USE_RESULT: {      
    CmCToken *token;
    script->GetToken(); //skip '('
    token = script->GetToken(); //get output type
    int outputtype = OutputType(token->token_);
    script->GetToken(); //skip ");"
    script->GetToken();

    //route output to input
    int error = edison.UseResult(outputtype);
    if(!error) {
      if(outputtype == OUTPUT_SEGM_IMAGE) {
	CmCPrompt("Segmented image result has been set as input.\n");
      } else {
	CmCPrompt("Filtered image result has been set as input.\n");
      }
    }      
    if(error) return error;
    break;
  }

    //***************************************************
    //edge detect the input image
    //***************************************************

  case CMD_EDGE_DETECT: {
    edison.SetParameters(paramTable);
    int error = edison.EdgeDetect();
    if(error) return error;
    script->GetToken(); //skip ';'
    break;
  }    

    //***************************************************
    //filter the input image
    //***************************************************

  case CMD_FILTER: {
    edison.SetParameters(paramTable);
    int error = edison.Filter();    
    if(error) return error;
    script->GetToken(); //skip ';'
    break;
  }

    //***************************************************
    //fuse the regions of the input image
    //***************************************************

  case CMD_FUSE: {    
    edison.SetParameters(paramTable);
    int error = edison.Fuse();
    if(error) return error;
    script->GetToken(); //skip ';'
    break;
  }
    //***************************************************
    //segment the input image
    //***************************************************

  case CMD_SEGMENT: {    
    edison.SetParameters(paramTable);
    int error = edison.Segment();
    if(error) return error;
    script->GetToken(); //skip ';'
    break;
  }

    //***************************************************
    //does nothing
    //***************************************************

  default:
    break;
  }

  //command executed succesfully!
  return NO_ERRORS;

}    
void buffered_levels_with_copy( size_t concurrency ) {
    typedef typename std::tuple_element<0,OutputTuple>::type OutputType;
    // Do for lc = 1 to concurrency level
    for ( size_t lc = 1; lc <= concurrency; ++lc ) { 
        tbb::flow::graph g;

        inc_functor cf;
        cf.local_execute_count = Offset;
        global_execute_count = Offset;
       
        tbb::flow::multifunction_node< InputType, OutputTuple > exe_node( g, lc, cf );

        for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
           harness_mapped_receiver<OutputType> *receivers = new harness_mapped_receiver<OutputType>[num_receivers];
           for (size_t r = 0; r < num_receivers; ++r ) {
               tbb::flow::make_edge( tbb::flow::output_port<0>(exe_node), receivers[r] );
            }

            harness_counting_sender<InputType> *senders = NULL;
            for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
                senders = new harness_counting_sender<InputType>[num_senders];
                for (size_t s = 0; s < num_senders; ++s ) {
                    senders[s].my_limit = N;
                    tbb::flow::make_edge( senders[s], exe_node );
                }

                for (size_t r = 0; r < num_receivers; ++r ) {
                    receivers[r].initialize_map( N, num_senders ); 
                }

                NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) );
                g.wait_for_all();

                for (size_t s = 0; s < num_senders; ++s ) {
                    size_t n = senders[s].my_received;
                    ASSERT( n == N, NULL ); 
                    ASSERT( senders[s].my_receiver == &exe_node, NULL );
                }
                for (size_t r = 0; r < num_receivers; ++r ) {
                    receivers[r].validate();
                }
                delete [] senders;
            }
            for (size_t r = 0; r < num_receivers; ++r ) {
                tbb::flow::remove_edge( tbb::flow::output_port<0>(exe_node), receivers[r] );
            }
            ASSERT( exe_node.try_put( InputType() ) == true, NULL );
            g.wait_for_all();
            for (size_t r = 0; r < num_receivers; ++r ) {
                receivers[r].validate();
            }
            delete [] receivers;
        }

        // validate that the local body matches the global execute_count and both are correct
        inc_functor body_copy = tbb::flow::copy_body<inc_functor>( exe_node );
        const size_t expected_count = N/2 * MAX_NODES * MAX_NODES * ( MAX_NODES + 1 ) + MAX_NODES + Offset; 
        size_t global_count = global_execute_count;
        size_t inc_count = body_copy.local_execute_count;
        ASSERT( global_count == expected_count && global_count == inc_count, NULL ); 
    }
}
void buffered_levels( size_t concurrency, Body body ) {
    typedef typename std::tuple_element<0,OutputTuple>::type OutputType;
    // Do for lc = 1 to concurrency level
    for ( size_t lc = 1; lc <= concurrency; ++lc ) { 
        tbb::flow::graph g;

        // Set the execute_counter back to zero in the harness
        harness_graph_multifunction_executor<InputType, OutputTuple,tbb::spin_mutex>::execute_count = 0;
        // Set the max allowed executors to lc.  There is a check in the functor to make sure this is never exceeded.
        harness_graph_multifunction_executor<InputType, OutputTuple,tbb::spin_mutex>::max_executors = lc;

        // Create the function_node with the appropriate concurreny level, and use default buffering
        tbb::flow::multifunction_node< InputType, OutputTuple > exe_node( g, lc, body );
   
        //Create a vector of identical exe_nodes
        std::vector< tbb::flow::multifunction_node< InputType, OutputTuple > > exe_vec(2, exe_node);

        // exercise each of the copied nodes
        for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) {
            for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
                // Create num_receivers counting receivers and connect the exe_vec[node_idx] to them.
                harness_mapped_receiver<OutputType> *receivers = new harness_mapped_receiver<OutputType>[num_receivers];
                for (size_t r = 0; r < num_receivers; ++r ) {
                    tbb::flow::make_edge( tbb::flow::output_port<0>(exe_vec[node_idx]), receivers[r] );
                }

                // Do the test with varying numbers of senders
                harness_counting_sender<InputType> *senders = NULL;
                for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
                    // Create num_senders senders, set there message limit each to N, and connect them to the exe_vec[node_idx]
                    senders = new harness_counting_sender<InputType>[num_senders];
                    for (size_t s = 0; s < num_senders; ++s ) {
                        senders[s].my_limit = N;
                        tbb::flow::make_edge( senders[s], exe_vec[node_idx] );
                    }

                    // Initialize the receivers so they know how many senders and messages to check for
                    for (size_t r = 0; r < num_receivers; ++r ) {
                         receivers[r].initialize_map( N, num_senders ); 
                    }

                    // Do the test
                    NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) );
                    g.wait_for_all();

                    // cofirm that each sender was requested from N times 
                    for (size_t s = 0; s < num_senders; ++s ) {
                        size_t n = senders[s].my_received;
                        ASSERT( n == N, NULL ); 
                        ASSERT( senders[s].my_receiver == &exe_vec[node_idx], NULL );
                    }
                    // validate the receivers
                    for (size_t r = 0; r < num_receivers; ++r ) {
                        receivers[r].validate();
                    }
                    delete [] senders;
                }
                for (size_t r = 0; r < num_receivers; ++r ) {
                    tbb::flow::remove_edge( tbb::flow::output_port<0>(exe_vec[node_idx]), receivers[r] );
                }
                ASSERT( exe_vec[node_idx].try_put( InputType() ) == true, NULL );
                g.wait_for_all();
                for (size_t r = 0; r < num_receivers; ++r ) {
                    // since it's detached, nothing should have changed
                    receivers[r].validate();
                }
                delete [] receivers;
            }
        } 
    }
}
Example #19
0
/*
** This routine implements the Sizer() function for <INPUT>,
** <SELECT> and <TEXTAREA> markup.
**
** A side effect of sizing these markups is that widgets are
** created to represent the corresponding input controls.
**
** The function normally returns 0.  But if it is dealing with
** a <SELECT> or <TEXTAREA> that is incomplete, 1 is returned.
** In that case, the sizer will be called again at some point in
** the future when more information is available.
*/
int HtmlControlSize(HtmlWidget *htmlPtr, HtmlElement *pElem){
  char *zWin;            /* Name of child widget that implements this input */
  int incomplete = 0;    /* True if data is incomplete */
  Tcl_DString cmd;       /* The complete -formcommand callback */
 
  if( pElem->input.sized ) return 0;
  pElem->input.type = InputType(pElem);
  switch( pElem->input.type ){
    case INPUT_TYPE_Checkbox:
    case INPUT_TYPE_Hidden:
    case INPUT_TYPE_Image:
    case INPUT_TYPE_Radio:
    case INPUT_TYPE_Reset:
    case INPUT_TYPE_Submit:
    case INPUT_TYPE_Text:
    case INPUT_TYPE_Password:
    case INPUT_TYPE_File: {
      int result;
      char zToken[50];

      if( pElem->input.pForm==0 || htmlPtr->zFormCommand==0 
           || htmlPtr->zFormCommand[0]==0 ){
        EmptyInput(pElem);
        break;
      }
      Tcl_DStringInit(&cmd);
      Tcl_DStringAppend(&cmd, htmlPtr->zFormCommand, -1);
      sprintf(zToken," %d input ",pElem->input.pForm->form.id);
      Tcl_DStringAppend(&cmd, zToken, -1);
      pElem->input.cnt = ++htmlPtr->nInput;
      zWin = MakeWindowName(htmlPtr, pElem);
      Tcl_DStringAppend(&cmd, zWin, -1);
      Tcl_DStringStartSublist(&cmd);
      HtmlAppendArglist(&cmd, pElem);
      Tcl_DStringEndSublist(&cmd);
      HtmlLock(htmlPtr);
      result = Tcl_GlobalEval(htmlPtr->interp, Tcl_DStringValue(&cmd));
      Tcl_DStringFree(&cmd);
      if( !HtmlUnlock(htmlPtr) ){
        SizeAndLink(htmlPtr, zWin, pElem);
      }
      HtmlFree(zWin);
      break;
    }
    case INPUT_TYPE_Select: {
      int result;
      char zToken[50];

      if( pElem->input.pForm==0 || htmlPtr->zFormCommand==0 
           || htmlPtr->zFormCommand[0]==0 ){
        EmptyInput(pElem);
        break;
      }
      Tcl_DStringInit(&cmd);
      Tcl_DStringAppend(&cmd, htmlPtr->zFormCommand, -1);
      sprintf(zToken," %d select ",pElem->input.pForm->form.id);
      Tcl_DStringAppend(&cmd, zToken, -1);
      pElem->input.cnt = ++htmlPtr->nInput;
      zWin = MakeWindowName(htmlPtr, pElem);
      Tcl_DStringAppend(&cmd, zWin, -1);
      Tcl_DStringStartSublist(&cmd);
      HtmlAppendArglist(&cmd, pElem);
      Tcl_DStringEndSublist(&cmd);
      Tcl_DStringStartSublist(&cmd);
      AddSelectOptions(&cmd, pElem, pElem->input.pEnd);
      Tcl_DStringEndSublist(&cmd);
      HtmlLock(htmlPtr);
      result = Tcl_GlobalEval(htmlPtr->interp, Tcl_DStringValue(&cmd));
      Tcl_DStringFree(&cmd);
      if( !HtmlUnlock(htmlPtr) ){
        SizeAndLink(htmlPtr, zWin, pElem);
      }
      HtmlFree(zWin);
      break;
    }
    case INPUT_TYPE_TextArea: {
      int result;
      char zToken[50];

      if( pElem->input.pForm==0 || htmlPtr->zFormCommand==0 
           || htmlPtr->zFormCommand[0]==0 ){
        EmptyInput(pElem);
        break;
      }
      Tcl_DStringInit(&cmd);
      Tcl_DStringAppend(&cmd, htmlPtr->zFormCommand, -1);
      sprintf(zToken," %d textarea ",pElem->input.pForm->form.id);
      Tcl_DStringAppend(&cmd, zToken, -1);
      pElem->input.cnt = ++htmlPtr->nInput;
      zWin = MakeWindowName(htmlPtr, pElem);
      Tcl_DStringAppend(&cmd, zWin, -1);
      Tcl_DStringStartSublist(&cmd);
      HtmlAppendArglist(&cmd, pElem);
      Tcl_DStringEndSublist(&cmd);
      Tcl_DStringStartSublist(&cmd);
      HtmlAppendText(&cmd, pElem, pElem->input.pEnd);
      Tcl_DStringEndSublist(&cmd);
      HtmlLock(htmlPtr);
      result = Tcl_GlobalEval(htmlPtr->interp, Tcl_DStringValue(&cmd));
      Tcl_DStringFree(&cmd);
      if( !HtmlUnlock(htmlPtr) ){
        SizeAndLink(htmlPtr, zWin, pElem);
      }
      HtmlFree(zWin);
      break;
    }
    case INPUT_TYPE_Applet: {
      int result;

      if( htmlPtr->zAppletCommand==0 || htmlPtr->zAppletCommand[0]==0 ){
        EmptyInput(pElem);
        break;
      }
      Tcl_DStringInit(&cmd);
      Tcl_DStringAppend(&cmd, htmlPtr->zAppletCommand, -1);
      Tcl_DStringAppend(&cmd, " ", 1);
      pElem->input.cnt = ++htmlPtr->nInput;
      zWin = MakeWindowName(htmlPtr, pElem);
      Tcl_DStringAppend(&cmd, zWin, -1);
      Tcl_DStringStartSublist(&cmd);
      HtmlAppendArglist(&cmd, pElem);
      Tcl_DStringEndSublist(&cmd);
      HtmlLock(htmlPtr);
      result = Tcl_GlobalEval(htmlPtr->interp, Tcl_DStringValue(&cmd));
      Tcl_DStringFree(&cmd);
      if( !HtmlUnlock(htmlPtr) ){
        SizeAndLink(htmlPtr, zWin, pElem);
      }
      HtmlFree(zWin);
      break;
    }
    default: {
      CANT_HAPPEN;
      pElem->base.flags &= ~HTML_Visible;
      pElem->base.style.flags |= STY_Invisible;
      pElem->input.tkwin = 0;
      break;
    }
  }
  return incomplete;
}