double Trainer::PreviousMinibatchEvaluationAverage() const { if (!m_evaluationFunction) InvalidArgument("Trainer::PreviousMinibatchEvaluationAverage: Cannot get evaluation criterion value when no evaluation function was specified during 'this' trainer's construction"); return (GetScalarValue(m_prevMinibatchAggregateEvalCriterionValue) / m_prevMinibatchNumSamples); }
void CrawlStack(lua_State* L,LuaStack& S,int level) { int t = lua_type(L, level); if (t==LUA_TTABLE) { TableCrawler C(L,level); boost::shared_ptr<LuaTable> v=C.GetTable(); S.Append(v); } else { S.Append(GetScalarValue(L,level)); } }
double Trainer::TestMinibatch(const std::unordered_map<Variable, ValuePtr>& arguments, const DeviceDescriptor& computeDevice /*= DeviceDescriptor::UseDefaultDevice()*/) { if (!m_aggregatedEvaluationFunction) InvalidArgument("Trainer::TestMinibatch: Cannot test when no evaluation function was specified during 'this' trainer's construction"); // TODO: Should we refactor this code that is somewhat similar to the prologue of the TrainMinibatch function std::unordered_map<Variable, ValuePtr> outputs = { { m_aggregatedEvaluationFunction, nullptr }, { m_testSampleCountVar, nullptr } }; m_combinedTrainingFunction->Forward(arguments, outputs, computeDevice); auto sampleCount = GetSampleCount(m_testSampleCountVar, outputs[m_testSampleCountVar]); return (GetScalarValue(outputs[m_aggregatedEvaluationFunction]) / sampleCount); }
void CrawlStack(lua_State* L,LuaStack& S) { int top = lua_gettop(L); for (int i = 1; i <= top; i++) { /* repeat for each level */ int t = lua_type(L, i); if (t==LUA_TTABLE) { TableCrawler C(L,i); boost::shared_ptr<LuaTable> v=C.GetTable(); S.Append(v); } else { S.Append(GetScalarValue(L,i)); } } }
double Trainer::PreviousMinibatchLossAverage() const { return (GetScalarValue(m_prevMinibatchAggregateTrainingLossValue) / m_prevMinibatchNumSamples); }