Ejemplo n.º 1
0
void Net::Train(const mxArray *mx_data, const mxArray *mx_labels) {  
  
  //mexPrintMsg("Start training...");  
  ReadData(mx_data);
  ReadLabels(mx_labels);
  InitNorm();
  
  std::srand(params_.seed_);  
  
  size_t train_num = labels_.size1();
  size_t numbatches = (size_t) ceil((ftype) train_num/params_.batchsize_);
  trainerror_.resize(params_.numepochs_, numbatches);
  for (size_t epoch = 0; epoch < params_.numepochs_; ++epoch) {    
    std::vector<size_t> randind(train_num);
    for (size_t i = 0; i < train_num; ++i) {
      randind[i] = i;
    }
    if (params_.shuffle_) {
      std::random_shuffle(randind.begin(), randind.end());
    }
    std::vector<size_t>::const_iterator iter = randind.begin();
    for (size_t batch = 0; batch < numbatches; ++batch) {
      size_t batchsize = std::min(params_.batchsize_, (size_t)(randind.end() - iter));
      std::vector<size_t> batch_ind = std::vector<size_t>(iter, iter + batchsize);
      iter = iter + batchsize;      
      Mat data_batch = SubMat(data_, batch_ind, 1);      
      Mat labels_batch = SubMat(labels_, batch_ind, 1);      
      UpdateWeights(epoch, false);      
      InitActiv(data_batch);
      Mat pred_batch;
      Forward(pred_batch, 1);      
      InitDeriv(labels_batch, trainerror_(epoch, batch));
      Backward();
      CalcWeights();      
      UpdateWeights(epoch, true); 
      if (params_.verbose_ == 2) {
        std::string info = std::string("Epoch: ") + std::to_string(epoch+1) +
                           std::string(", batch: ") + std::to_string(batch+1);
        mexPrintMsg(info);
      }      
    } // batch    
    if (params_.verbose_ == 1) {
      std::string info = std::string("Epoch: ") + std::to_string(epoch+1);
      mexPrintMsg(info);
    }
  } // epoch
  //mexPrintMsg("Training finished");
}
Ejemplo n.º 2
0
         HError(999,"Can only update linear transforms OR model parameters!");

      xfInfo.useOutXForm = TRUE;

      /* This initialises things - temporary hack - THINK!! */

      CreateAdaptXForm(hset, "tmp");

   } 



   

   /* initialise and  pass information to the forward backward library */

   InitialiseForBack(fbInfo, x, hset, uFlags, pruneInit, pruneInc,

                     pruneLim, minFrwdP);



   if (parMode != 0) {

      ConvLogWt(hset);

   }

   /* 2-model reestimation */

   if (al_hmmUsed){

       if (trace&T_TOP)

           printf("2-model re-estimation enabled\n");

       /* load alignment HMM set */

       CreateHMMSet(&al_hset,&hmmStack,TRUE);

       xfInfo.al_hset = &al_hset;

       if (xfInfo.alXFormExt == NULL) xfInfo.alXFormExt = xfInfo.inXFormExt;

       /* load multiple MMFs */

       if (strlen(al_hmmMMF) > 0 ) {

           char *p,*q;

           Boolean eos;

           p=q=al_hmmMMF;

           for(;;) {

               eos = (*p=='\0');

               if ( ( isspace((int) *p) || *p == '\0' ) && (q!=p) ) {
/**
* Function that get two vectors, one for the entries and another to the outs, and train one epoch: 
*/
int BKPNeuralNet::Train( int size, float *entry, int size2, float *out, float l_rate, float momentum )
{
	/* Inserting the entries: */
	if( SetEntry( size, entry ) == -1 )
		return -1;

	/* Getting the wrong out values: */
	ActivAll();

	/* Training the layer before the out layer: */
  if( momentum != 0)
    OutGradientMtm( out, size2, momentum );
  else
    OutGradient( out, size2 );

  /* Training the net for each hidden layer: */
	for( int i = _layers - 2; i > 0 ; i-- )
  {
    if( momentum != 0)
      HiddenGradientMtm( i, momentum );
    else
      HiddenGradient( i );
  }
			
	/* Updating the weights: */
	UpdateWeights( l_rate );

	return 0;
}
Ejemplo n.º 4
0
void Net::Train(const mxArray *mx_data, const mxArray *mx_labels) {  

  //mexPrintMsg("Start training...");  
  ReadData(mx_data);
  ReadLabels(mx_labels);
  InitNorm();
  
  size_t train_num = data_.size1();
  size_t numbatches = DIVUP(train_num, params_.batchsize_);
  trainerrors_.resize(params_.epochs_, 2);
  trainerrors_.assign(0);
  for (size_t epoch = 0; epoch < params_.epochs_; ++epoch) {    
    if (params_.shuffle_) {
      Shuffle(data_, labels_);      
    }
    StartTimer();    
    size_t offset = 0;
    Mat data_batch, labels_batch, pred_batch;      
    for (size_t batch = 0; batch < numbatches; ++batch) {
      size_t batchsize = MIN(train_num - offset, params_.batchsize_);      
      UpdateWeights(epoch, false);
      data_batch.resize(batchsize, data_.size2());
      labels_batch.resize(batchsize, labels_.size2());      
      SubSet(data_, data_batch, offset, true);      
      SubSet(labels_, labels_batch, offset, true);
      ftype error1;      
      InitActiv(data_batch);
      Forward(pred_batch, 1);            
      InitDeriv(labels_batch, error1);
      trainerrors_(epoch, 0) += error1;
      Backward();
      UpdateWeights(epoch, true); 
      offset += batchsize;
      if (params_.verbose_ == 2) {
        mexPrintInt("Epoch", (int) epoch + 1);
        mexPrintInt("Batch", (int) batch + 1);
      }
    } // batch  
    MeasureTime("totaltime");
    if (params_.verbose_ == 1) {
      mexPrintInt("Epoch", (int) epoch + 1);
    }        
  } // epoch  
  trainerrors_ /= (ftype) numbatches;
  //mexPrintMsg("Training finished");
}
Ejemplo n.º 5
0
/* EXPORT->MAPUpdateModels: update all models and save them in newDir if set,
   new files have newExt if set */
void MAPUpdateModels(HMMSet *hset, UPDSet uFlags)
{
  HMMScanState hss;
  HLink hmm;
  int px,nmapped=0,totM;
  long n;

  if (hset->logWt == TRUE) HError(999,"HMap: requires linear weights");

  /* Intialise a few global variables */
  SetVFloor( hset, vFloor, minVar);
  maxM = MaxMixInSet(hset);
  totM = TotMixInSet(hset);
  S = hset->swidth[0];

  if (hset->hsKind == TIEDHS){ /* TIEDHS - update mu & var once per HMMSet */
    HError(999,"TIEDHS kind not currently supported in MAP estimation");
  }

  NewHMMScan(hset,&hss);
  px=1;
  do {   
    hmm = hss.hmm;
    n = (long)hmm->hook;
    if (n<minEgs && !(trace&T_UPD))
      HError(-2331,"UpdateModels: %s[%d] copied: only %d egs\n",
	     HMMPhysName(hset,hmm),px,n);
    if (n>=minEgs && n>0) {
      if (uFlags & UPTRANS)
	HError(999,"No support for MAP updating transition probabilities");
      if (maxM>1 && uFlags & UPMIXES)
	UpdateWeights(hset,px,hmm);
      if (hset->hsKind != TIEDHS){
	if (uFlags & UPVARS)
	  UpdateVars(hset,px,hmm);
	if (uFlags & UPMEANS)
	  nmapped += UpdateMeans(hset,px,hmm);
	if (uFlags & (UPMEANS|UPVARS))
	  FixGConsts(hmm);
      }  
    }
    px++;
  } while (GoNextHMM(&hss));
  EndHMMScan(&hss);
  if (trace&T_TOP) {
    printf("Observed components (means) %d of %d: %.2f\n",nmapped,totM,100*(float)nmapped/(float)totM);
    if (nFloorVar > 0)
      printf("Total %d floored variance elements in %d different mixes\n",
	     nFloorVar,nFloorVarMix);
    fflush(stdout);
  }
   
  /* Reset vfloor */
  ResetVFloor(hset,vFloor);
}
Ejemplo n.º 6
0
    void AllocationMW::RunAllocationMW(int num_iterations) {
        cout << "Running allocation LP MW algorithm \n";
        for (int t = 1; t <= num_iterations; ++t) {
            cout << "Entering iteration " << t << "\n";

            // Get primal solution.
            double t1, t2;
            float diff;
            t1 = omp_get_wtime();
            global_problem_.ConstructPrimal(t);
            t2 = omp_get_wtime();
            diff = t2-t1;
            cout << "execution time of relaxation computation was " << diff << "\n";

            t1 = clock();
            //Instance::UpdateAvgPrimal(t, solution_);
            t2 = clock();
            diff = ((float)t2-(float)t1);
            cout << "execution time of average primal update was " << diff << "\n";

            // Runs CPLEX for debugging only, should be turned off.
            // VerifySolution();

            // Calculate slacks, update averages and recalculate weights.
            t1 = omp_get_wtime();
            CalculateSlacks();
            UpdateAvgSlacks(t);
            t2 = omp_get_wtime();
            diff = t2-t1;
            cout << "execution time of slack update was " << diff << "\n";

            // Calculate slacks, update averages and recalculate weights.
            t1 = omp_get_wtime();
            ReportWorstInfeasibility(t);
            t2 = omp_get_wtime();
            diff = t2-t1;
            cout << "execution time of worst infeasibility update was " << diff << "\n";

            // Calculate slacks, update averages and recalculate weights.
            t1 = clock();
            UpdateWeights();
            t2 = clock();
            diff = ((float)t2-(float)t1);
            cout << "execution time of weight update was " << diff << "\n";

            t1 = omp_get_wtime();
            UpdateGlobalProblem();
            t2 = omp_get_wtime();
            diff = t2-t1;
            cout << "execution time of global problem update was " << diff << "\n";

            ReportWeightStats();
        }
    }
Ejemplo n.º 7
0
/* UpdateModels: update all models and save them in newDir if set,
   new files have newExt if set */
void UpdateModels(void)
{
   int n;
   HLink hmm;
   HMMScanState hss;
   
   if (trace&T_INT){
      printf("Starting Model Update\n"); fflush(stdout);
   }

   if (hsKind==TIEDHS){
      if (uFlags & UPVARS)  /* TIEDHS therefore only done once per HMMSet */
         UpdateTMVars();
      if (uFlags & UPMEANS) 
         UpdateTMMeans();      
      if (uFlags & (UPMEANS|UPVARS))
         FixAllGConsts(&hset);
   }

   NewHMMScan(&hset,&hss);
   do {
      hmm = hss.hmm;   
      n = (int)hmm->hook;
      if (n<minEgs && !(trace&T_OPT))
         HError(-2428,"%s copied: only %d egs\n",HMMPhysName(&hset,hmm),n);
      if (n>=minEgs) {
         if (uFlags & UPTRANS)
            UpdateTrans(hmm);
         if (maxMixes>1 && uFlags & UPMIXES)
            UpdateWeights(hmm);
      }
      if (trace&T_OPT) {
         if (n<minEgs)
            printf("Model %s copied: only %d examples\n",
                   HMMPhysName(&hset,hmm),n);
         else
            printf("Model %s updated with %d examples\n",
                   HMMPhysName(&hset,hmm),n);
         fflush(stdout);
      }
   } while (GoNextHMM(&hss));
   EndHMMScan(&hss);
   if (trace&T_TOP){
      printf("Saving hmm's to dir %s\n",(newDir==NULL)?"Current":newDir); 
      fflush(stdout);
   }

   if(SaveHMMSet(&hset,newDir,newExt,NULL,saveBinary)<SUCCESS)
      HError(2411,"UpdateModels: SaveHMMSet failed");
   ResetHeaps();                               /* Clean Up */
   if (trace&T_TOP)
      printf("Reestimation complete - average log prob per frame = %e\n",
             totalPr/(double)totalT);
}
Ejemplo n.º 8
0
/**
  * Function that get two vectors, one for the entries and another to the outs, and train one epoch: 
  */
 int Train( PtNet Net, int size, float *entry, int size2, float *out, float l_rate )
 {
    int Ret;

    /* Inserting the entries: */
    Ret = SetEntry( Net, size, entry);
    /* Getting the wrong out values: */
    ActivAll(Net);
    /* Training one epoch: */
    Ret = TrainOneEpoch( Net, out, size2 );
    /* Updating the weights: */
    UpdateWeights( Net, l_rate );
    return Ret;
 }
Ejemplo n.º 9
0
 void Instance::RunMultiplicativeWeights(long double num_iterations, long double numerical_accuracy_tolerance) {
     SetBudgets(0.25);
     CalculateInstanceWidth();
     BuildPrimals();
     
     //ComputeCPLEXRevenue();
     
     bool mw_algorithm = false;
     
     if (mw_algorithm) {
     CreateGlobalProblem();
     global_problem_.InitializeBudgetAllocation();
         
     for (int t = 1; t <= num_iterations; ++t) {
         cout << "Entering iteration ";
         cout << t;
         cout << "\n";
                     
         // Get primal solution.
         global_problem_.ConstructPrimal(primal_sol_, t);
             
         // VerifySolution();
         
         // Calculate slacks, update averages and recalculate weights.
         cout << "Running MW update \n";
         CalculateSlacks();
         UpdateAvgPrimal(t);
         UpdateAvgSlacks(t);
         ReportWorstInfeasibility(t);
         UpdateWeights();
         UpdateGlobalProblem();
         ReportWeightStats();
     }
     } else {
         NaiveMW naive_mw = NaiveMW(num_impressions_, num_advertisers_, max_bid_, epsilon_, width_, bid_sparsity_,
                                    budgets_);
         naive_mw.RunNaiveMW(&bids_matrix_, primal_sol_, &avg_primal_sol_);
     }
 }
Ejemplo n.º 10
0
void Net::Train(const mxArray *mx_data, const mxArray *mx_labels) {  
  
  //mexPrintMsg("Start training...");
  LayerFull *lastlayer = static_cast<LayerFull*>(layers_.back());
  std::vector<size_t> labels_dim = mexGetDimensions(mx_labels);  
  mexAssert(labels_dim.size() == 2, "The label array must have 2 dimensions");    
  mexAssert(labels_dim[0] == lastlayer->length_,
    "Labels and last layer must have equal number of classes");  
  size_t train_num = labels_dim[1];  
  Mat labels(labels_dim);
  mexGetMatrix(mx_labels, labels);
  classcoefs_.assign(labels_dim[0], 1);
  if (params_.balance_) {  
    Mat labels_mean(labels_dim[0], 1);
    labels.Mean(2, labels_mean);
    for (size_t i = 0; i < labels_dim[0]; ++i) {
      mexAssert(labels_mean(i) > 0, "Balancing impossible: one of the classes is not presented");  
      (classcoefs_[i] /= labels_mean(i)) /= labels_dim[0];      
    }
  }
  if (lastlayer->function_ == "SVM") {
    (labels *= 2) -= 1;    
  }
  
  size_t mapnum = 1;  
  if (mexIsCell(mx_data)) {
    mapnum = mexGetNumel(mx_data);    
  }
  mexAssert(mapnum == layers_.front()->outputmaps_,
    "Data must have the same number of cells as outputmaps on the first layer");
  std::vector< std::vector<Mat> > data(mapnum);  
  for (size_t map = 0; map < mapnum; ++map) {
    const mxArray *mx_cell;  
    if (mexIsCell(mx_data)) {
      mx_cell = mxGetCell(mx_data, map);
    } else {
      mx_cell = mx_data;
    }
    std::vector<size_t> data_dim = mexGetDimensions(mx_cell);  
    mexAssert(data_dim.size() == 3, "The data array must have 3 dimensions");  
    mexAssert(data_dim[0] == layers_.front()->mapsize_[0] && 
              data_dim[1] == layers_.front()->mapsize_[1],
             "Data and the first layer must have equal sizes");    
    mexAssert(data_dim[2] == train_num, "All data maps and labels must have equal number of objects");    
    mexGetMatrix3D(mx_cell, data[map]);
  }
  
      
  
  size_t numbatches = ceil((double) train_num/params_.batchsize_);
  trainerror_.assign(params_.numepochs_ * numbatches, 0);
  for (size_t epoch = 0; epoch < params_.numepochs_; ++epoch) {    
    std::vector<size_t> randind(train_num);
    for (size_t i = 0; i < train_num; ++i) {
      randind[i] = i;
    }
    if (params_.shuffle_) {
      std::random_shuffle(randind.begin(), randind.end());
    }
    std::vector<size_t>::const_iterator iter = randind.begin();
    for (size_t batch = 0; batch < numbatches; ++batch) {
      size_t batchsize = std::min(params_.batchsize_, (size_t)(randind.end() - iter));
      std::vector<size_t> batch_ind = std::vector<size_t>(iter, iter + batchsize);
      iter = iter + batchsize;
      std::vector< std::vector<Mat> > data_batch(mapnum);
      for (size_t map = 0; map < mapnum; ++map) {
        data_batch[map].resize(batchsize);
        for (size_t i = 0; i < batchsize; ++i) {        
          data_batch[map][i] = data[map][batch_ind[i]];
        }
      }      
      Mat labels_batch(labels_dim[0], batchsize);
      Mat pred_batch(labels_dim[0], batchsize);
      labels.SubMat(batch_ind, 2 ,labels_batch);
      UpdateWeights(false);      
      Forward(data_batch, pred_batch, true);
      Backward(labels_batch, trainerror_[epoch * numbatches + batch]);      
      UpdateWeights(true);
      if (params_.verbose_ == 2) {
        std::string info = std::string("Epoch: ") + std::to_string(epoch+1) +
                           std::string(", batch: ") + std::to_string(batch+1);
        mexPrintMsg(info);
      }
    } // batch    
    if (params_.verbose_ == 1) {
      std::string info = std::string("Epoch: ") + std::to_string(epoch+1);                         
      mexPrintMsg(info);
    }
  } // epoch
  //mexPrintMsg("Training finished");
}
Ejemplo n.º 11
0
void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set_with_bias,
                          double learning_rate,
                          int max_iterations,
                          double min_error_cost) {
  int num_examples = training_sample_set_with_bias.size();
  int num_features = training_sample_set_with_bias[0].GetInputVectorSize();

  //{
  //  int layer_i = -1;
  //  int node_i = -1;
  //  std::cout << "Starting weights:" << std::endl;
  //  for (const auto & layer : m_layers) {
  //    layer_i++;
  //    node_i = -1;
  //    std::cout << "Layer " << layer_i << " :" << std::endl;
  //    for (const auto & node : layer.GetNodes()) {
  //      node_i++;
  //      std::cout << "\tNode " << node_i << " :\t";
  //      for (auto m_weightselement : node.GetWeights()) {
  //        std::cout << m_weightselement << "\t";
  //      }
  //      std::cout << std::endl;
  //    }
  //  }
  //}
  size_t i = 0;
  double current_iteration_cost_function = 0.0;
  for (i = 0; i < max_iterations; i++) {
    current_iteration_cost_function = 0.0;
    for (auto & training_sample_with_bias : training_sample_set_with_bias) {
      std::vector<double> predicted_output;
      std::vector< std::vector<double> > all_layers_activations;
      GetOutput(training_sample_with_bias.input_vector(),
                &predicted_output,
                &all_layers_activations);
      const std::vector<double> &  correct_output =
        training_sample_with_bias.output_vector();

      assert(correct_output.size() == predicted_output.size());
      std::vector<double> deriv_error_output(predicted_output.size());

      if ((i % (max_iterations / 100)) == 0) {
        std::stringstream temp_training;
        temp_training << training_sample_with_bias << "\t\t";

        temp_training << "Predicted output: [";
        for (int i = 0; i < predicted_output.size(); i++) {
          if (i != 0)
            temp_training << ", ";
          temp_training << predicted_output[i];
        }
        temp_training << "]";
        LOG(INFO) << temp_training.str();
      }

      for (int j = 0; j < predicted_output.size(); j++) {
        current_iteration_cost_function +=
          (std::pow)((correct_output[j] - predicted_output[j]), 2);
        deriv_error_output[j] =
          -2 * (correct_output[j] - predicted_output[j]);
      }

      UpdateWeights(all_layers_activations,
                    deriv_error_output,
                    learning_rate);
    }

    if ((i % (max_iterations / 100)) == 0)
      LOG(INFO) << "Iteration " << i << " cost function f(error): "
      << current_iteration_cost_function;
    if (current_iteration_cost_function < min_error_cost)
      break;
  }

  LOG(INFO) << "Iteration " << i << " cost function f(error): "
    << current_iteration_cost_function;

  LOG(INFO) << "******************************";
  LOG(INFO) << "******* TRAINING ENDED *******";
  LOG(INFO) << "******* " << i << " iters *******";
  LOG(INFO) << "******************************";
  //{
  //  int layer_i = -1;
  //  int node_i = -1;
  //  std::cout << "Final weights:" << std::endl;
  //  for (const auto & layer : m_layers) {
  //    layer_i++;
  //    node_i = -1;
  //    std::cout << "Layer " << layer_i << " :" << std::endl;
  //    for (const auto & node : layer.GetNodes()) {
  //      node_i++;
  //      std::cout << "\tNode " << node_i << " :\t";
  //      for (auto m_weightselement : node.GetWeights()) {
  //        std::cout << m_weightselement << "\t";
  //      }
  //      std::cout << std::endl;
  //    }
  //  }
  //}
};
Ejemplo n.º 12
0
extern int ExpandedEnsembleDynamics(FILE *log, t_inputrec *ir, gmx_enerdata_t *enerd,
                                    t_state *state, t_extmass *MassQ, int fep_state, df_history_t *dfhist,
                                    gmx_int64_t step,
                                    rvec *v, t_mdatoms *mdatoms)
/* Note that the state variable is only needed for simulated tempering, not
   Hamiltonian expanded ensemble.  May be able to remove it after integrator refactoring. */
{
    real       *pfep_lamee, *scaled_lamee, *weighted_lamee;
    double     *p_k;
    int         i, nlim, lamnew, totalsamples;
    real        oneovert, maxscaled = 0, maxweighted = 0;
    t_expanded *expand;
    t_simtemp  *simtemp;
    gmx_bool    bIfReset, bSwitchtoOneOverT, bDoneEquilibrating = FALSE;

    expand  = ir->expandedvals;
    simtemp = ir->simtempvals;
    nlim    = ir->fepvals->n_lambda;

    snew(scaled_lamee, nlim);
    snew(weighted_lamee, nlim);
    snew(pfep_lamee, nlim);
    snew(p_k, nlim);

    /* update the count at the current lambda*/
    dfhist->n_at_lam[fep_state]++;

    /* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda state that's
       pressure controlled.*/
    /*
       pVTerm = 0;
       where does this PV term go?
       for (i=0;i<nlim;i++)
       {
       fep_lamee[i] += pVTerm;
       }
     */

    /* determine the minimum value to avoid overflow.  Probably a better way to do this */
    /* we don't need to include the pressure term, since the volume is the same between the two.
       is there some term we are neglecting, however? */

    if (ir->efep != efepNO)
    {
        for (i = 0; i < nlim; i++)
        {
            if (ir->bSimTemp)
            {
                /* Note -- this assumes no mass changes, since kinetic energy is not added  . . . */
                scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(simtemp->temperatures[i]*BOLTZ)
                    + enerd->term[F_EPOT]*(1.0/(simtemp->temperatures[i])- 1.0/(simtemp->temperatures[fep_state]))/BOLTZ;
            }
            else
            {
                scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(expand->mc_temp*BOLTZ);
                /* mc_temp is currently set to the system reft unless otherwise defined */
            }

            /* save these energies for printing, so they don't get overwritten by the next step */
            /* they aren't overwritten in the non-free energy case, but we always print with these
               for simplicity */
        }
    }
    else
    {
        if (ir->bSimTemp)
        {
            for (i = 0; i < nlim; i++)
            {
                scaled_lamee[i] = enerd->term[F_EPOT]*(1.0/simtemp->temperatures[i] - 1.0/simtemp->temperatures[fep_state])/BOLTZ;
            }
        }
    }

    for (i = 0; i < nlim; i++)
    {
        pfep_lamee[i] = scaled_lamee[i];

        weighted_lamee[i] = dfhist->sum_weights[i] - scaled_lamee[i];
        if (i == 0)
        {
            maxscaled   = scaled_lamee[i];
            maxweighted = weighted_lamee[i];
        }
        else
        {
            if (scaled_lamee[i] > maxscaled)
            {
                maxscaled = scaled_lamee[i];
            }
            if (weighted_lamee[i] > maxweighted)
            {
                maxweighted = weighted_lamee[i];
            }
        }
    }

    for (i = 0; i < nlim; i++)
    {
        scaled_lamee[i]   -= maxscaled;
        weighted_lamee[i] -= maxweighted;
    }

    /* update weights - we decide whether or not to actually do this inside */

    bDoneEquilibrating = UpdateWeights(nlim, expand, dfhist, fep_state, scaled_lamee, weighted_lamee, step);
    if (bDoneEquilibrating)
    {
        if (log)
        {
            fprintf(log, "\nStep %d: Weights have equilibrated, using criteria: %s\n", (int)step, elmceq_names[expand->elmceq]);
        }
    }

    lamnew = ChooseNewLambda(nlim, expand, dfhist, fep_state, weighted_lamee, p_k,
                             ir->expandedvals->lmc_seed, step);
    /* if using simulated tempering, we need to adjust the temperatures */
    if (ir->bSimTemp && (lamnew != fep_state)) /* only need to change the temperatures if we change the state */
    {
        int   i, j, n, d;
        real *buf_ngtc;
        real  told;
        int   nstart, nend, gt;

        snew(buf_ngtc, ir->opts.ngtc);

        for (i = 0; i < ir->opts.ngtc; i++)
        {
            if (ir->opts.ref_t[i] > 0)
            {
                told              = ir->opts.ref_t[i];
                ir->opts.ref_t[i] =  simtemp->temperatures[lamnew];
                buf_ngtc[i]       = std::sqrt(ir->opts.ref_t[i]/told); /* using the buffer as temperature scaling */
            }
        }

        /* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */

        nstart = 0;
        nend   = mdatoms->homenr;
        for (n = nstart; n < nend; n++)
        {
            gt = 0;
            if (mdatoms->cTC)
            {
                gt = mdatoms->cTC[n];
            }
            for (d = 0; d < DIM; d++)
            {
                v[n][d] *= buf_ngtc[gt];
            }
        }

        if (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir))
        {
            /* we need to recalculate the masses if the temperature has changed */
            init_npt_masses(ir, state, MassQ, FALSE);
            for (i = 0; i < state->nnhpres; i++)
            {
                for (j = 0; j < ir->opts.nhchainlength; j++)
                {
                    state->nhpres_vxi[i+j] *= buf_ngtc[i];
                }
            }
            for (i = 0; i < ir->opts.ngtc; i++)
            {
                for (j = 0; j < ir->opts.nhchainlength; j++)
                {
                    state->nosehoover_vxi[i+j] *= buf_ngtc[i];
                }
            }
        }
        sfree(buf_ngtc);
    }

    /* now check on the Wang-Landau updating critera */

    if (EWL(expand->elamstats))
    {
        bSwitchtoOneOverT = FALSE;
        if (expand->bWLoneovert)
        {
            totalsamples = 0;
            for (i = 0; i < nlim; i++)
            {
                totalsamples += dfhist->n_at_lam[i];
            }
            oneovert = (1.0*nlim)/totalsamples;
            /* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */
            /* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */
            if ((dfhist->wl_delta <= ((totalsamples)/(totalsamples-1.00001))*oneovert) &&
                (dfhist->wl_delta < expand->init_wl_delta))
            {
                bSwitchtoOneOverT = TRUE;
            }
        }
        if (bSwitchtoOneOverT)
        {
            dfhist->wl_delta = oneovert; /* now we reduce by this each time, instead of only at flatness */
        }
        else
        {
            bIfReset = CheckHistogramRatios(nlim, dfhist->wl_histo, expand->wl_ratio);
            if (bIfReset)
            {
                for (i = 0; i < nlim; i++)
                {
                    dfhist->wl_histo[i] = 0;
                }
                dfhist->wl_delta *= expand->wl_scale;
                if (log)
                {
                    fprintf(log, "\nStep %d: weights are now:", (int)step);
                    for (i = 0; i < nlim; i++)
                    {
                        fprintf(log, " %.5f", dfhist->sum_weights[i]);
                    }
                    fprintf(log, "\n");
                }
            }
        }
    }
    sfree(pfep_lamee);
    sfree(scaled_lamee);
    sfree(weighted_lamee);
    sfree(p_k);

    return lamnew;
}
Ejemplo n.º 13
0
void Reaching::ReachingStep(float dt){
    if(dt<EPSILON) return;//no point of doing anything
    //  cout<<"DT "<<dt<<endl;
    joint_vec_t tmp1,tmp3; // temporary variables
    cart_vec_t tmp13;
    float dist=0;
  
    // dt = dt/10; 
    //dist = UpdateLocalTarget();
    if(dist>tol || isnan(dist)){
        SetLocalTarget(target);
        cout<<dist<<" "<<tol<<endl;
    }
    // vite in angle space and cart space
    alpha*=dt;beta*=dt;
    ViteAngle(tar_angle,pos_angle,v_angle,des_angle,des_v_angle);
    ViteCart(target,pos_cart,v_cart,des_cart,des_v_cart);
    alpha /= dt; beta/=dt;

    //  cout<<"vite done"<<endl; 
#ifdef OBSTACLE_AVOIDANCE
    // find the closest obstacle
    float ro,point;
    float gamma;
    CMatrix4_t ijac;
    v4_clear(des_a_angle_obs);
    if(env){
        for (i=1;i<3;i++){
            for(int j=0;j<env->CountManipulableObjects();j++){
                LinkDistanceToObject(i,env->GetObject(j),&ro,&point,tmp13);
                //      coutvec(tmp13);
                IntermediateJacobian(i,point,pos_angle,ijac);
                m3_4_t_v_multiply(ijac,tmp13,tmp1);
                // eq. 18 of Khatib,icra'85
                if(ro<=obstacle_rad){
                    gamma = nu *(1/ro -1/obstacle_rad)/(ro*ro); //(ro*ro) 
                    //test
                    //   gamma *= -v4_dot(tmp1,v_angle)/50;
                    //   gamma = max(0.0,gamma);
                }
                else{
                    gamma =0;
                }
                v4_scale(tmp1,gamma,tmp1);
                v4_add(des_a_angle_obs,tmp1,des_a_angle_obs);
            }
        }
        v4_add(des_a_angle_obs,des_v_angle,des_v_angle);
        v4_add(pos_angle,des_v_angle,des_angle);
    }
#endif
 
    body->SetAnglesInRange(des_angle);
    des_v_angle = des_angle - pos_angle;
    des_v_cart = des_cart - pos_cart;
    if(pure_joint_ctl){
        tmp1 = des_angle;
    }
    else{
        UpdateWeights();
        // coherence enforcement
        ProjectVector(des_v_cart,des_v_angle,tmp3);
        tmp1 = pos_angle+tmp3;
    }
    body->SetAnglesInRange(tmp1);
    body->Angle2Cart(tmp1,tmp13);
    v_angle = tmp1 - pos_angle;
    v_cart = tmp13 - pos_cart;
    pos_cart = tmp13;
    pos_angle = tmp1;
    //pos_angle.Print();
    //pos_cart.Print();
}