Пример #1
0
    std::pair<ValuePtr, size_t> Evaluator::TestLocalMinibatch(const std::unordered_map<Variable, ValuePtr>& arguments, std::unordered_map<Variable, ValuePtr>& outputsToFetch, const DeviceDescriptor& computeDevice)
    {
        if (!m_aggregatedEvaluationFunction)
            InvalidArgument("Evaluator::TestMinibatch: Cannot test when no evaluation function was specified during construction.");

        if (arguments.empty()) // Empty minibatch, return 0.
        {
            auto zeroValue = MakeSharedObject<Value>(
                MakeSharedObject<NDArrayView>(
                    m_aggregatedEvaluationFunction->Output().GetDataType(),
                    m_aggregatedEvaluationFunction->Output().IsSparse() ? StorageFormat::SparseCSC : StorageFormat::Dense,
                    m_aggregatedEvaluationFunction->Output().Shape(), computeDevice));
            if(zeroValue->GetDataType() == DataType::Float)
                zeroValue->Data()->SetValue(0.0f);
            else
                zeroValue->Data()->SetValue(0.0);
            return std::make_pair(zeroValue, 0);
        }

        std::unordered_map<Variable, ValuePtr> outputs = { { m_aggregatedEvaluationFunction, nullptr }, { m_testSampleCountVar, nullptr } };
        outputs.insert(outputsToFetch.begin(), outputsToFetch.end());

        m_combinedEvalFunction->Forward(arguments, outputs, computeDevice);

        const ValuePtr& aggregateEvalCriterionValue = outputs[m_aggregatedEvaluationFunction];
        auto sampleCount = GetSampleCount(m_testSampleCountVar, outputs[m_testSampleCountVar]);

        // Copy back output values for requested variables only.
        for (auto& o : outputsToFetch)
            o.second = outputs[o.first];

        return make_pair(aggregateEvalCriterionValue, sampleCount);
    }
bool AudioQueueStreamOut::SetSamplePos( int64_t samplepos)
{
    if( samplepos > GetSampleCount() ) 
        return false;
    mInfo.m_SeekToPacket = samplepos / mInfo.mDataFormat.mFramesPerPacket;
    return true;
}
Пример #3
0
ProfileSamplePtr ProfileThread::GetSample(int32 sampleIndex) const
{
	if (sampleIndex < GetSampleCount())
	{
		return m_samples[sampleIndex];
	}
	else
	{
		return nullptr;
	}
}
Пример #4
0
    double Trainer::TestMinibatch(const std::unordered_map<Variable, ValuePtr>& arguments, const DeviceDescriptor& computeDevice /*= DeviceDescriptor::UseDefaultDevice()*/)
    {
        if (!m_aggregatedEvaluationFunction)
            InvalidArgument("Trainer::TestMinibatch: Cannot test when no evaluation function was specified during 'this' trainer's construction");

        // TODO: Should we refactor this code that is somewhat similar to the prologue of the TrainMinibatch function
        std::unordered_map<Variable, ValuePtr> outputs = { { m_aggregatedEvaluationFunction, nullptr }, { m_testSampleCountVar, nullptr } };
        m_combinedTrainingFunction->Forward(arguments, outputs, computeDevice);

        auto sampleCount = GetSampleCount(m_testSampleCountVar, outputs[m_testSampleCountVar]);
        return (GetScalarValue(outputs[m_aggregatedEvaluationFunction]) / sampleCount);
    }
Пример #5
0
    void Trainer::ExecuteForwardBackward(const std::unordered_map<Variable, ValuePtr>& arguments, std::unordered_map<Variable, ValuePtr>& outputsToFetch, const DeviceDescriptor& computeDevice, std::unordered_map<Variable, ValuePtr>& parameterGradients)
    {
#ifndef  CNTK_UWP
        auto profForwardBackward = Microsoft::MSR::CNTK::ScopeProfile(Microsoft::MSR::CNTK::profilerEvtMainFB);
#endif
        std::unordered_map<Variable, ValuePtr> outputs = { { m_aggregatedLossFunction, nullptr }, { m_trainingSampleCountVar, nullptr } };
        if (m_aggregatedEvaluationFunction)
            outputs.insert({ m_aggregatedEvaluationFunction, nullptr });

        outputs.insert(outputsToFetch.begin(), outputsToFetch.end());

        auto backPropSate = m_combinedTrainingFunction->Forward(arguments, outputs, computeDevice, { m_aggregatedLossFunction }, m_modelParametersNotCoveredByLearners);
        m_prevMinibatchAggregateTrainingLossValue = outputs[m_aggregatedLossFunction];
        if (m_aggregatedEvaluationFunction)
            m_prevMinibatchAggregateEvalCriterionValue = outputs[m_aggregatedEvaluationFunction];

        for (auto outputToFetch : outputsToFetch)
        {
            if (outputToFetch.second == nullptr)
                outputsToFetch[outputToFetch.first] = outputs[outputToFetch.first];
        }

        if(!m_rootGradientValue ||
            m_aggregatedLossFunction->Output().GetDataType() != m_rootGradientValue->GetDataType() ||
            m_prevMinibatchAggregateTrainingLossValue->Shape() != m_rootGradientValue->Shape() ||
            computeDevice != m_rootGradientValue->Device() ||
            outputs.at(m_aggregatedLossFunction)->Mask() != m_rootGradientValue->Mask())
        {
            m_rootGradientValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(m_aggregatedLossFunction->Output().GetDataType(), m_prevMinibatchAggregateTrainingLossValue->Shape(), computeDevice), outputs.at(m_aggregatedLossFunction)->Mask());
        }

        if (m_aggregatedLossFunction->Output().GetDataType() == DataType::Float)
            m_rootGradientValue->Data()->SetValue(1.0f);
        else if (m_aggregatedLossFunction->Output().GetDataType() == DataType::Double)
            m_rootGradientValue->Data()->SetValue(1.0);
        else
            m_rootGradientValue->Data()->SetValue(half(1.0));

        for (const auto& parameter : m_learnerParameters)
            parameterGradients[parameter] = nullptr;

        // TODO: Why Backward signature does not take Parameter instead of Variable for gradients?
        m_combinedTrainingFunction->Backward(backPropSate, { { m_aggregatedLossFunction, m_rootGradientValue } }, parameterGradients);
        m_prevMinibatchNumSamples = GetSampleCount(m_trainingSampleCountVar, outputs[m_trainingSampleCountVar]);
    }
FText FBPProfilerStat::GetSamplesText() const
{
	return FText::AsNumber(GetSampleCount());
}
Пример #7
0
/*----------------------------------------------------------------------
|   AP4_SampleTable::GenerateStblAtom
+---------------------------------------------------------------------*/
AP4_Result 
AP4_SampleTable::GenerateStblAtom(AP4_ContainerAtom*& stbl)
{
    // create the stbl container
    stbl = new AP4_ContainerAtom(AP4_ATOM_TYPE_STBL);

    // create the stsd atom
    AP4_StsdAtom* stsd = new AP4_StsdAtom(this);

    // create the stsz atom
    AP4_StszAtom* stsz = new AP4_StszAtom();

    // create the stsc atom
    AP4_StscAtom* stsc = new AP4_StscAtom();

    // create the stts atom
    AP4_SttsAtom* stts = new AP4_SttsAtom();

    // create the stss atom
    AP4_StssAtom* stss = new AP4_StssAtom();
    
    // declare the ctts atom (may be created later)
    AP4_CttsAtom* ctts = NULL;
    
    // start chunk table
    AP4_Ordinal             current_chunk_index              = 0;
    AP4_Size                current_chunk_size               = 0;
    AP4_Position            current_chunk_offset             = 0;
    AP4_Cardinal            current_samples_in_chunk         = 0;
    AP4_Ordinal             current_sample_description_index = 0;
    AP4_UI32                current_duration                 = 0;
    AP4_Cardinal            current_duration_run             = 0;
    AP4_UI32                current_cts_delta                = 0;
    AP4_Cardinal            current_cts_delta_run            = 0;
    AP4_Array<AP4_Position> chunk_offsets;

    // process all the samples
    bool         all_samples_are_sync = false;
    AP4_Cardinal sample_count = GetSampleCount();
    for (AP4_Ordinal i=0; i<sample_count; i++) {
        AP4_Sample sample;
        GetSample(i, sample);
        
        // update DTS table
        AP4_UI32 new_duration = sample.GetDuration();
        if (new_duration != current_duration && current_duration_run != 0) {
            // emit a new stts entry
            stts->AddEntry(current_duration_run, current_duration);
            
            // reset the run count
            current_duration_run = 0;
        } 
        ++current_duration_run;
        current_duration = new_duration;
        
        // update CTS table
        AP4_UI32 new_cts_delta = sample.GetCtsDelta();
        if (new_cts_delta != current_cts_delta && current_cts_delta_run != 0) {
            // create a ctts atom if we don't have one
            if (ctts == NULL) ctts = new AP4_CttsAtom();
            
            //emit a new ctts entry
            ctts->AddEntry(current_cts_delta_run, current_cts_delta);
            
            // reset the run count
            current_cts_delta_run = 0;
        }
        ++current_cts_delta_run;
        current_cts_delta = new_cts_delta;
        
        // add an entry into the stsz atom
        stsz->AddEntry(sample.GetSize());
        
        // update the sync sample table
        if (sample.IsSync()) {
            stss->AddEntry(i+1);
            if (i==0) all_samples_are_sync = true;
        } else {
            all_samples_are_sync = false;
        }
        
        // see in which chunk this sample is
        AP4_Ordinal chunk_index = 0;
        AP4_Ordinal position_in_chunk = 0;
        AP4_Result  result = GetSampleChunkPosition(i, chunk_index, position_in_chunk);
        if (AP4_SUCCEEDED(result)) {
            if (chunk_index != current_chunk_index && current_samples_in_chunk != 0) {
                // new chunk
                chunk_offsets.Append(current_chunk_offset);
                current_chunk_offset += current_chunk_size;

                stsc->AddEntry(1, 
                               current_samples_in_chunk,
                               current_sample_description_index+1);

                current_samples_in_chunk = 0;
                current_chunk_size       = 0;
            }
            current_chunk_index = chunk_index;
        }

        // store the sample description index
        current_sample_description_index = sample.GetDescriptionIndex();
                
        // adjust the current chunk info
        current_chunk_size += sample.GetSize();
        ++current_samples_in_chunk;        
    }

    // finish the stts table
    if (sample_count) stts->AddEntry(current_duration_run, current_duration);

    // finish the ctts table if we have one
    if (ctts) {
        AP4_ASSERT(current_cts_delta_run != 0);
        
        // add a ctts entry
        ctts->AddEntry(current_cts_delta_run, current_cts_delta);
    } 
    
    // process any unfinished chunk
    if (current_samples_in_chunk != 0) {
        // new chunk
        chunk_offsets.Append(current_chunk_offset);
        stsc->AddEntry(1, 
                       current_samples_in_chunk,
                       current_sample_description_index+1);
    }

    // attach the children of stbl
    stbl->AddChild(stsd);
    stbl->AddChild(stsz);
    stbl->AddChild(stsc);
    stbl->AddChild(stts);
    if (ctts) stbl->AddChild(ctts);
    if (!all_samples_are_sync && stss->GetEntries().ItemCount() != 0) {
        stbl->AddChild(stss);
    } else {
        delete stss;
    }
    
    // see if we need a co64 or an stco atom
    AP4_Size  chunk_count = chunk_offsets.ItemCount();
    if (current_chunk_offset <= 0xFFFFFFFF) {
        // make an array of 32-bit entries
        AP4_UI32* chunk_offsets_32 = new AP4_UI32[chunk_count];
        for (unsigned int i=0; i<chunk_count; i++) {
            chunk_offsets_32[i] = (AP4_UI32)chunk_offsets[i];
        }
        // create the stco atom
        AP4_StcoAtom* stco = new AP4_StcoAtom(&chunk_offsets_32[0], chunk_count);
        stbl->AddChild(stco);

        delete[] chunk_offsets_32;
    } else {
        // create the co64 atom
        AP4_Co64Atom* co64 = new AP4_Co64Atom(&chunk_offsets[0], chunk_count);
        stbl->AddChild(co64);
    }


    return AP4_SUCCESS;
}
Пример #8
0
int CWaveFile::UpdateHeader( void )
/////////////////////////////////////////////////////////////////////////////
{
	MMRESULT Result;

	// ascend out of the data subchunk
	Result = Ascend( &m_mmckinfoSubchunk );
	if( Result )
		DPF(("UpdateHeader: Ascend data subchunk failed!\n"));
	//DPF(("fccType [%08lx] %lu\n", m_mmckinfoSubchunk.fccType, m_mmckinfoSubchunk.cksize ));

	// ascend out of the WAVE chunk, which automaticlly updates the cksize field
	Result = Ascend( &m_mmckinfoParent );
	if( Result )
		DPF(("UpdateHeader: Ascend WAVE chunk failed %ld!\n", Result ));
	//DPF(("fccType [%08lx] %lu\n", m_mmckinfoParent.fccType, m_mmckinfoParent.cksize ));

	//MMSYSERR_INVALPARAM 
	// force write to disk
	Result = Flush( MMIO_EMPTYBUF );
	if( Result )
		DPF(("UpdateHeader: Flush failed!\n"));

	// if a compressed format, then update the fact chunk
	if( m_FormatEx.wFormatTag != WAVE_FORMAT_PCM )
	{
		DWORD		dwSampleLength;
		MMCKINFO	mmckinfoFact;

		// move to start of file
		SeekBegin( 0 );

		// find the WAVE chunk
		m_mmckinfoParent.fccType = mmioFOURCC('W','A','V','E');
		if( Descend( (LPMMCKINFO)&m_mmckinfoParent, NULL, MMIO_FINDRIFF ) )
		{
			Close();
			return( FALSE );
		}

		// Now, find the fact chunk 
		mmckinfoFact.ckid = mmioFOURCC('f','a','c','t');
		if( Descend( &mmckinfoFact, &m_mmckinfoParent, MMIO_FINDCHUNK ) )
		{
			Close();
			return( FALSE );
		}

		// compute out the number of samples
		dwSampleLength = GetSampleCount();

		// write the fact chunk
		if( Write( (HPSTR)&dwSampleLength, sizeof( DWORD )) != (LONG)sizeof( DWORD ) )
		{
			Close();
			return( FALSE );
		}

		// Ascend out of the fact subchunk.
		if( Ascend( &mmckinfoFact ) )
			DPF(("Ascend Failed\n"));
		// Ascend out of the WAVE chunk.
		if( Ascend( &m_mmckinfoParent ) )
			DPF(("Ascend Failed\n"));
	}

	return( TRUE );
}
Пример #9
0
    bool Trainer::TrainMinibatch(const std::unordered_map<Variable, ValuePtr>& arguments, std::unordered_map<Variable, ValuePtr>& outputsToFetch, const DeviceDescriptor& computeDevice /*= DeviceDescriptor::UseDefaultDevice()*/)
    {
        std::unordered_map<Variable, ValuePtr> outputs = { { m_aggregatedLossFunction, nullptr }, { m_trainingSampleCountVar, nullptr } };
        if (m_aggregatedEvaluationFunction)
            outputs.insert({ m_aggregatedEvaluationFunction, nullptr });

        outputs.insert(outputsToFetch.begin(), outputsToFetch.end());

        if (m_distributedTrainer)
            m_distributedTrainer->PreMinibatchCallback(*this);

        auto backPropSate = m_combinedTrainingFunction->Forward(arguments, outputs, computeDevice, { m_aggregatedLossFunction });
        m_prevMinibatchAggregateTrainingLossValue = outputs[m_aggregatedLossFunction];
        if (m_aggregatedEvaluationFunction)
            m_prevMinibatchAggregateEvalCriterionValue = outputs[m_aggregatedEvaluationFunction];

        for (auto outputToFetch : outputsToFetch)
        {
            if (outputToFetch.second == nullptr)
                outputsToFetch[outputToFetch.first] = outputs[outputToFetch.first];
        }

        ValuePtr rootGradientValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(m_aggregatedLossFunction->Output().GetDataType(), m_prevMinibatchAggregateTrainingLossValue->Shape(), computeDevice), outputs.at(m_aggregatedLossFunction)->Mask());
        if (m_aggregatedLossFunction->Output().GetDataType() == DataType::Float)
            rootGradientValue->Data()->SetValue(1.0f);
        else
            rootGradientValue->Data()->SetValue(1.0);

        auto modelParameters = m_combinedTrainingFunction->Parameters();
        std::unordered_map<Variable, ValuePtr> parameterGradients;
        for (const auto& parameter : modelParameters)
        {
            parameterGradients[parameter] = nullptr;
        }

        m_combinedTrainingFunction->Backward(backPropSate, { { m_aggregatedLossFunction, rootGradientValue } }, parameterGradients);

        m_prevMinibatchNumSamples = GetSampleCount(m_trainingSampleCountVar, outputs[m_trainingSampleCountVar]);

        bool endOfData = m_prevMinibatchNumSamples == 0;
        if (m_distributedTrainer)
        {
            // Aggregation should happen in the same order, the order of parmaters is guaranteed to be the same.
            std::vector<std::pair<Parameter, NDArrayViewPtr>> gradients;
            gradients.reserve(modelParameters.size());
            for (const auto& parameter : modelParameters)
                gradients.push_back(std::make_pair(parameter, parameterGradients[parameter]->Data()));

            MinibatchInfo info
            {
                arguments.empty(),
                m_prevMinibatchNumSamples,
                m_prevMinibatchAggregateTrainingLossValue->Data(),
                m_prevMinibatchAggregateEvalCriterionValue->Data()
            };

            endOfData = m_distributedTrainer->PreParameterUpdateCallback(*this, gradients, info);
            m_prevMinibatchNumSamples = info.numberOfSamples;
        }

        bool anyUpdatesPerformed = false;
        for (auto learner : m_parameterLearners)
        {
            std::unordered_map<Parameter, NDArrayViewPtr> learnerParameterGradients;
            const auto& learnerParameters = learner->Parameters();
            for (const auto& parameter : learnerParameters)
            {
                learnerParameterGradients[parameter] = parameterGradients[parameter]->Data();

                if (parameterGradients[parameter]->Mask())
                    LogicError("The gradient value for a Parameter cannot have an associated mask!");
            }

            anyUpdatesPerformed |= learner->Update(learnerParameterGradients, m_prevMinibatchNumSamples);
        }

        return anyUpdatesPerformed && !endOfData;
    }
int64_t AudioQueueStreamOut::GetLengthMs()
{
        
    return (int64_t) ((double)GetSampleCount() * 1000.0 / (double)mInfo.mDataFormat.mSampleRate );

}
Пример #11
0
/*----------------------------------------------------------------------
|       AP4_SampleTable::GenerateStblAtom
+---------------------------------------------------------------------*/
AP4_Result 
AP4_SampleTable::GenerateStblAtom(AP4_ContainerAtom*& stbl)
{
    // create the stbl container
    stbl = DNew AP4_ContainerAtom(AP4_ATOM_TYPE_STBL);

    // create the stsd atom
    AP4_StsdAtom* stsd = DNew AP4_StsdAtom(this);

    // create the stsz atom
    AP4_StszAtom* stsz = DNew AP4_StszAtom();

    // create the stsc atom
    AP4_StscAtom* stsc = DNew AP4_StscAtom();

    // start chunk table
    AP4_Cardinal        samples_in_chunk = 0;
    AP4_Offset          current_chunk_offset = 0;
    AP4_Size            current_chunk_size = 0;
    AP4_Array<AP4_UI32> chunk_offsets;

    // process all the samples
    AP4_Cardinal sample_count = GetSampleCount();
    for (AP4_Ordinal i=0; i<sample_count; i++) {
        AP4_Sample sample;
        GetSample(i, sample);
        
        // add an entry into the stsz atom
        stsz->AddEntry(sample.GetSize());
        
        // adjust the current chunk info
        current_chunk_size += sample.GetSize();

        // count the sample
        samples_in_chunk++;
        if (samples_in_chunk == 10) {
            // new chunk
            chunk_offsets.Append(current_chunk_offset);
            stsc->AddEntry(1, 10, 1);
            samples_in_chunk = 0;

            // adjust the chunk offset
            current_chunk_offset += current_chunk_size;
            current_chunk_size = 0;
        }
    }

    // process any unfinished chunk
    if (samples_in_chunk != 0) {
        // new chunk
        chunk_offsets.Append(current_chunk_offset);
        stsc->AddEntry(1, samples_in_chunk, 1);
    }

    // create the stco atom
    AP4_StcoAtom* stco = DNew AP4_StcoAtom(&chunk_offsets[0], 
                                          chunk_offsets.ItemCount());

    // create the stts atom (for now, we assume sample of equal duration)
    AP4_SttsAtom* stts = DNew AP4_SttsAtom();
    stts->AddEntry(sample_count, 1000); // FIXME

    // attach the children of stbl
    stbl->AddChild(stsd);
    stbl->AddChild(stsz);
    stbl->AddChild(stsc);
    stbl->AddChild(stco);
    stbl->AddChild(stts);

    return AP4_SUCCESS;
}