Example #1
0
    bool Trainer::TrainMinibatch(const std::unordered_map<Variable, ValuePtr>& arguments, bool isSweepEndInArguments, std::unordered_map<Variable, ValuePtr>& outputsToFetch, const DeviceDescriptor& computeDevice /*= DeviceDescriptor::UseDefaultDevice()*/)
    {
#ifndef  CNTK_UWP
        auto profMinibatch = Microsoft::MSR::CNTK::ScopeProfile(Microsoft::MSR::CNTK::profilerEvtMainMinibatch);
#endif

        bool result = (!m_distributed) ?
            TrainLocalMinibatch(arguments, outputsToFetch, isSweepEndInArguments, computeDevice) :
            TrainDistributedMinibatch(arguments, outputsToFetch, isSweepEndInArguments, computeDevice);

        // TODO: exclude updating progress writers from profiling?
        UpdateTrainingProgress(m_prevMinibatchNumSamples, m_prevMinibatchAggregateTrainingLossValue,
                               m_prevMinibatchAggregateEvalCriterionValue, computeDevice);
        return result;
    }
Example #2
0
 bool Trainer::TrainMinibatch(const std::unordered_map<Variable, ValuePtr>& arguments, std::unordered_map<Variable, ValuePtr>& outputsToFetch, const DeviceDescriptor& computeDevice /*= DeviceDescriptor::UseDefaultDevice()*/)
 {
     if (!m_distributed)
         return TrainLocalMinibatch(arguments, outputsToFetch, computeDevice);
     return TrainDistributedMinibatch(arguments, outputsToFetch, computeDevice);
 }