Ejemplo n.º 1
0
static util::StringVector convertActivationsToGraphemeLabels(matrix::Matrix&& activations,
    const model::Model& model)
{
    assert(activations.size().size() >= 3);

    if(activations.size().size() > 3)
    {
        activations = reshape(activations,
            {activations.size().front(),
             activations.size()[1],
             activations.size().product() / (activations.size().front() * activations.size()[1])});
    }

    size_t timesteps     = activations.size()[2];
    size_t miniBatchSize = activations.size()[1];
    size_t graphemes     = activations.size()[0];

    util::StringVector labels;

    for(size_t miniBatch = 0; miniBatch < miniBatchSize; ++miniBatch)
    {
        std::string currentLabel;

        size_t currentGrapheme = graphemes;

        for(size_t timestep = 0; timestep < timesteps; ++timestep)
        {
            size_t maxGrapheme = 0;
            double maxValue    = std::numeric_limits<double>::min();

            for(size_t grapheme = 0; grapheme < graphemes; ++grapheme)
            {
                if(activations(grapheme, miniBatch, timestep) >= maxValue)
                {
                    maxValue    = activations(grapheme, miniBatch, timestep);
                    maxGrapheme = grapheme;
                }
            }

            if(maxGrapheme != currentGrapheme)
            {
                currentGrapheme = maxGrapheme;
                auto newGrapheme = model.getOutputLabel(maxGrapheme);

                currentLabel.insert(currentLabel.end(), newGrapheme.begin(), newGrapheme.end());
            }

        }

        labels.push_back(currentLabel);
    }

    return labels;
}
Ejemplo n.º 2
0
static util::StringVector convertActivationsToLabels(matrix::Matrix&& activations,
    const model::Model& model)
{
    if(model.hasAttribute("UsesGraphemes") && model.getAttribute<bool>("UsesGraphemes"))
    {
        return convertActivationsToGraphemeLabels(std::move(activations), model);
    }

    if(activations.size().size() > 2)
    {
        activations = reshape(activations,
            {activations.size().front(),
             activations.size().product() / activations.size().front()});
    }

    size_t samples = activations.size()[1];
    size_t columns = activations.size()[0];

    util::StringVector labels;

    for(size_t sample = 0; sample < samples; ++sample)
    {
        size_t maxColumn = 0;
        double maxValue  = 0.0f;

        for(size_t column = 0; column < columns; ++column)
        {
            if(activations(column, sample) >= maxValue)
            {
                maxValue  = activations(column, sample);
                maxColumn = column;
            }
        }

        labels.push_back(model.getOutputLabel(maxColumn));
    }

    return labels;
}
Ejemplo n.º 3
0
SimTK::State StateInitializer::getOptimizedInitState()
{
	if(_n_Z == 0)
		return _initState;


	StateInitializerQP qp(this);

	qp.setNumParameters(_n_controls);

	PrintVector(_lower_bd_actuatorforces,"_lower_bd_actuatorforces",std::cout);
	PrintVector(_upper_bd_actuatorforces,"_upper_bd_actuatorforces",std::cout);

	qp.setParameterLimits(_lower_bd_actuatorforces,_upper_bd_actuatorforces);
	qp.setNumEqualityConstraints(_UDotRef.size());
	qp.setNumInequalityConstraints(0);
	

	//SimTK::Optimizer opt(qp,SimTK::CFSQP);
	SimTK::Optimizer opt(qp, SimTK::InteriorPoint);
	opt.setConvergenceTolerance(1e-4);	//tol
	opt.setMaxIterations(200);	//200

	Vector result(_n_controls);
	result.setToZero();

	SimTK::Real f = 0.0;

//	qp.testQP();

	try{
		f = opt.optimize(result);
	}
	catch(const std::exception& ex)
	{
		std::cout<<ex.what()<<std::endl;
	}

	std::cout<<"Initial State Optimization Error: "<<f<<std::endl;

	SimTK::State stateOpt = _initState;
	
	Vector muscleForces(_n_muscles);
	Vector result_forces = result.elementwiseMultiply(_opt_actuator_forces);

	muscleForces = result_forces.block(0,0,_n_muscles,1).getAsVector();
	Vector tol_lm(_n_muscles), tol_acts(_n_muscles);
	Vector muscleFiberLens(_n_muscles), activations(_n_muscles);
	
	tol_lm.setTo(1.0e-6);
	tol_acts.setTo(1.0e-3);

	PrintVector(result,"result",std::cout);

	rootSolveMuscleFiberLength(stateOpt,muscleForces,_lm_min,_lm_max,tol_lm,muscleFiberLens);

	ROCINForceController::setStateFiberLength(*_model,muscleFiberLens,stateOpt);

	Vector lm_dot(_n_muscles);
	lm_dot.setToZero();

	_model->getMultibodySystem().realize(_initState,SimTK::Stage::Dynamics);
	

	int idx_musc = 0;
	for(int i=0;i<_n_controls;i++)
	{
		Actuator& act = _model->getActuators().get(i);
		Muscle* m = dynamic_cast<Muscle*>(&act);
		if(m!= NULL)
		{
			lm_dot[idx_musc] = m->getSpeed(_initState);//m->getSpeed(_initState)/m->getCosPennationAngle(_initState);//0;//m->getSpeed(_initState);
			idx_musc++;
		}
		
	}



	rootSolveActivations(stateOpt,muscleForces,muscleFiberLens,lm_dot,_acts_min,_acts_max,tol_acts,activations);

	ROCINForceController::setStateActivation(*_model,activations,stateOpt);	

	return stateOpt;
}
Ejemplo n.º 4
0
//---------------------------------------------------------------------------
// 8. True action of the node (if activate returned true)
void
NaPNTeacher::action ()
{
    unsigned    iLayer;
    unsigned    iInpLayer = pnn->get_nn_unit()->InputLayer();

    // Take neural network state at the time it's feed forward calculation
    NaNNUnit	pastNN;
    pnn->pop_nn(pastNN);

    if(is_verbose()){
      int	i;
      NaPrintLog("NaPNTeacher (%p, %s[%s]):\n  NN input: ",
		 this, name(), pastNN.GetInstance());
      for(i = 0; i < pastNN.Xinp0.dim(); ++i)
	NaPrintLog(" %g", pastNN.Xinp0[i]);
      if(errout.links() == 0){
	NaPrintLog("\n  NN target: ");
	for(i = 0; i < desout.data().dim(); ++i)
	  NaPrintLog(" %g", desout.data()[i]);
      }else{
	NaPrintLog("\n  NN error: ");
	for(i = 0; i < errout.data().dim(); ++i)
	  NaPrintLog(" %g", errout.data()[i]);
      }
      NaPrintLog("\n");
    }

    if(bLearn){
      // Let's calculate delta weight from the past NN
      bpe->AttachNN(&pastNN);

      // One more activations since last update
      ++nLastUpdate;

      for(iLayer = pastNN.OutputLayer();
	  (int)iLayer >= (int)iInpLayer; --iLayer){
        if(pastNN.OutputLayer() == iLayer){
            // Output layer
            if(errout.links() == 0){
                // Input pack #1
                // Doesn't really need nnout due to it's stored inside NN
                bpe->DeltaRule(&desout.data()[0]);
            }else{
                // Input pack #2
                bpe->DeltaRule(&errout.data()[0], true);
            }
        }else{
            // Hidden layer
            bpe->DeltaRule(iLayer, iLayer + 1);
        }
      }// backward step

      // Compute error on input
      if(errinp.links() > 0){
        unsigned    iInput;
        NaVector    &einp = errinp.data();

        einp.init_zero();
        for(iInput = 0; iInput < einp.dim(); ++iInput){
	    // Or may be += ???  I didn't recognize the difference...
            einp[iInput] -= bpe->PartOfDeltaRule(iInpLayer, iInput);
        }
      }

      // Now let's forget the past NN state since changes should be
      // applied to the current neural net anyway
      bpe->DetachNN();

      // Autoupdate facility
      if(0 != nAutoUpdateFreq && nLastUpdate >= nAutoUpdateFreq){
	NaPrintLog("%s: Automatic update #%d of NN '%s' (%d sample)\n",
		   name(), iUpdateCounter,
		   bpe->nn().GetInstance()? bpe->nn().GetInstance(): "",
		   activations());
	update_nn();

	// Call procedure
	if(NULL != auProc)
	  (*auProc)(iUpdateCounter, pData);
      }// if bLearn is on
    }
}