// Arguments "opt_param" and "grad" are updated by nlopt.
// "opt_param" corresponds to the optimization parameters.
// "grad" corresponds to the gradient.
double CollaborativeFiltering::ComputeCost(
  const std::vector<double> &opt_param,std::vector<double> &grad,
  const DataUnlabeled &ratings_data,const DataUnlabeled &indicator_data,
  int num_users,int num_movies,int num_features) {
  assert(num_users >= 1);
  assert(num_movies >= 1);
  assert(num_features >= 1);
  const int kTotalNumFeatures = num_features*(num_users+num_movies);
  arma::vec nlopt_theta = arma::zeros<arma::vec>(kTotalNumFeatures,1);
  arma::mat features_cvec = arma::zeros<arma::mat>(num_movies*num_features,1);
  arma::mat params_cvec = arma::zeros<arma::mat>(num_users*num_features,1);

  // Uses current value of "theta" from nlopt.
  for(int feature_index=0; feature_index<kTotalNumFeatures; feature_index++)
  {
    nlopt_theta(feature_index) = opt_param[feature_index];
    if (feature_index < (num_users*num_features)) {
      params_cvec(feature_index) = opt_param[feature_index];
    }
    else {
      features_cvec(feature_index-(num_users*num_features)) = \
        opt_param[feature_index];
    }
  }
  set_theta(nlopt_theta);
  arma::mat features_cvec_squared = arma::square(features_cvec);
  arma::mat params_cvec_squared = arma::square(params_cvec);
  arma::mat params_mat = arma::reshape(params_cvec,num_users,num_features);
  arma::mat features_mat = \
    arma::reshape(features_cvec,num_movies,num_features);

  // Computes cost function given current value of "theta".
  const arma::mat kSubsetIndicatorMat = \
    indicator_data.training_features().submat(0,0,num_movies-1,num_users-1);
  const arma::mat kSubsetRatingsMat = \
    ratings_data.training_features().submat(0,0,num_movies-1,num_users-1);
  const arma::mat kYMat = \
    (arma::ones<arma::mat>(num_users,num_movies)-kSubsetIndicatorMat.t()) % \
    (params_mat*features_mat.t())+kSubsetRatingsMat.t();
  const arma::mat kCostFunctionMat = params_mat*features_mat.t()-kYMat;
  const arma::vec kCostFunctionVec = arma::vectorise(kCostFunctionMat);
  const double kJTheta = 0.5*arma::as_scalar(sum(square(kCostFunctionVec)));

  // Adds regularization term.
  const double kRegTerm = 0.5*lambda_*\
    (as_scalar(sum(params_cvec_squared))+\
    as_scalar(sum(features_cvec_squared)));
  const double kJThetaReg = kJTheta+kRegTerm;

  // Updates "grad" for nlopt.
  const int kReturnCode = this->ComputeGradient(ratings_data,indicator_data,\
    num_users,num_movies,num_features);
  const arma::vec kCurrentGradient = gradient();
  for(int feature_index=0; feature_index<kTotalNumFeatures; feature_index++)
  {
    grad[feature_index] = kCurrentGradient(feature_index);
  }

  return kJThetaReg;
}
// The step size should be chosen carefully to guarantee convergence given a 
// reasonable number of computations.
int GradientDescent::RunGradientDescent(const Data &data) {
  assert(num_iters_ >= 1);
  const int kNumTrainEx = data.num_train_ex();
  assert(kNumTrainEx >= 1);
  const arma::mat kTrainingFeatures = data.training_features();
  const arma::vec kTrainingLabels = data.training_labels();

  double *j_theta_array = new double[num_iters_];

  // Recall that we are trying to minimize the following cost function:
  // ((training features) * (current weights) - (training labels))^2
  // Each iteration of this algorithm updates the weights as a scaled 
  // version of the gradient of this cost function.
  // This gradient is computed with respect to the weights.
  // Thus, each iteration of gradient descent performs the following:
  // (update) = (step size) * ((training features) * (current weights) - (training labels)) * (training features)
  // (new weights) = (current weights) - (update)
  for(int theta_index=0; theta_index<num_iters_; theta_index++)
  {
    const arma::vec kDiffVec = kTrainingFeatures*theta_-kTrainingLabels;
    const arma::mat kDiffVecTimesTrainFeat = \
      join_rows(kDiffVec % kTrainingFeatures.col(0),\
      kDiffVec % kTrainingFeatures.col(1));
    const arma::vec kThetaNew = theta_-alpha_*(1/(float)kNumTrainEx)*\
      (sum(kDiffVecTimesTrainFeat)).t();
    j_theta_array[theta_index] = ComputeCost(data);
    set_theta(kThetaNew);
  }

  delete [] j_theta_array;

  return 0;
}
Esempio n. 3
0
/*! 
  Initialize the image point visual feature with polar coordinates.

  \param rho, theta : Polar coordinates \f$(\rho,\theta)\f$ of
  the image point.

  \param Z_ : 3D depth of the point in the camera frame.
  
  \sa set_rho(), set_theta(), set_Z()
*/
void
vpFeaturePointPolar::set_rhoThetaZ(const double rho,
					 const double theta,
           const double Z_)
{
  set_rho(rho) ;
  set_theta(theta) ;
  set_Z(Z_) ;

  for(unsigned int i = 0; i < nbParameters; i++) flags[i] = true;
}
Esempio n. 4
0
bool Viterbi::open(const Param &param, Tokenizer *t, Connector *c) {
  tokenizer_ = t;
  connector_ = c;

  begin_node_list_.reserve(MIN_INPUT_BUFFER_SIZE);
  end_node_list_.reserve(MIN_INPUT_BUFFER_SIZE);
  path_freelist_.reset(0);

  copy_sentence_ = param.get<bool>("allocate-sentence");
  cost_factor_ = param.get<int>("cost-factor");
  CHECK_FALSE(cost_factor_ > 0) << "cost-factor is empty";

  set_theta(param.get<double>("theta"));
  set_lattice_level(param.get<int>("lattice-level"));
  set_partial(param.get<bool>("partial"));
  set_all_morphs(param.get<bool>("all-morphs"));

  return true;
}
// Arguments "opt_param" and "grad" are updated by nlopt.
// "opt_param" corresponds to the optimization parameters.
// "grad" corresponds to the gradient.
double NeuralNetwork::ComputeCost(const std::vector<double> &opt_param,
                                  std::vector<double> &grad,const DataMulti &data_multi) {
    assert(num_layers_ >= 2);
    const int kNumFeatures = hidden_layer_size_*(input_layer_size_+1)+\
                             output_layer_size_*(hidden_layer_size_+1);
    assert(kNumFeatures >= 1);
    const int kNumTrainEx = data_multi.num_train_ex();
    assert(kNumTrainEx >= 1);
    arma::vec nlopt_theta = arma::randu<arma::vec>(kNumFeatures,1);

    // Uses current value of "theta" from nlopt.
    for(int feature_index=0; feature_index<kNumFeatures; feature_index++)
    {
        nlopt_theta(feature_index) = opt_param[feature_index];
    }

    // Extracts Theta1 and Theta2 from nlopt_theta.
    const arma::vec kNLoptTheta1 = \
                                   nlopt_theta.rows(0,hidden_layer_size_*(input_layer_size_+1)-1);
    const arma::vec kNLoptTheta2 = \
                                   nlopt_theta.rows(hidden_layer_size_*(input_layer_size_+1),\
                                           hidden_layer_size_*(input_layer_size_+1)+\
                                           output_layer_size_*(hidden_layer_size_+1)-1);

    // Reshapes Theta1 and Theta2 as matrices.
    const arma::mat kNLoptTheta1Mat = \
                                      arma::reshape(kNLoptTheta1,hidden_layer_size_,input_layer_size_+1);
    const arma::mat kNLoptTheta2Mat = \
                                      arma::reshape(kNLoptTheta2,output_layer_size_,hidden_layer_size_+1);
    std::vector<arma::mat> current_theta;
    current_theta.push_back(kNLoptTheta1Mat);
    current_theta.push_back(kNLoptTheta2Mat);
    set_theta(current_theta);

    // Runs feedforward propagation.
    arma::mat layer_activation_in = data_multi.training_features();
    arma::mat sigmoid_arg = layer_activation_in*theta_.at(0).t();
    arma::mat layer_activation_out = ComputeSigmoid(sigmoid_arg);
    for(int layer_index=1; layer_index<num_layers_; layer_index++)
    {
        const arma::mat kOnesMat = arma::ones(layer_activation_out.n_rows,1);
        layer_activation_in = arma::join_horiz(kOnesMat,layer_activation_out);
        sigmoid_arg = layer_activation_in*theta_.at(layer_index).t();
        layer_activation_out = ComputeSigmoid(sigmoid_arg);
    }

    // Sets up matrix of (multi-class) training labels.
    arma::mat label_mat = arma::randu<arma::mat>(kNumTrainEx,output_layer_size_);
    label_mat.zeros(kNumTrainEx,output_layer_size_);
    for(int example_index=0; example_index<kNumTrainEx; example_index++)
    {
        int col_index = \
                        (int)arma::as_scalar(data_multi.training_labels()(example_index));
        label_mat(example_index,col_index-1) = 1.0;
    }

    // Computes regularized neural network cost.
    const arma::mat kCostFuncVal = \
                                   (-1)*(label_mat%arma::log(layer_activation_out)+\
                                         (1-label_mat)%arma::log(1-layer_activation_out));
    const double kJTheta = accu(kCostFuncVal)/kNumTrainEx;
    const arma::mat kNLoptTheta1MatSquared = kNLoptTheta1Mat%kNLoptTheta1Mat;
    const arma::mat kNLoptTheta2MatSquared = kNLoptTheta2Mat%kNLoptTheta2Mat;
    const arma::mat kNLoptTheta1MatSquaredTrans = kNLoptTheta1MatSquared.t();
    const arma::mat kNLoptTheta2MatSquaredTrans = kNLoptTheta2MatSquared.t();
    const double kRegTerm = \
                            (lambda()/(2*kNumTrainEx))*(accu(kNLoptTheta1MatSquaredTrans)+\
                                    accu(kNLoptTheta2MatSquaredTrans)-\
                                    accu(kNLoptTheta1MatSquaredTrans.row(0))-\
                                    accu(kNLoptTheta2MatSquaredTrans.row(0)));
    const double kJThetaReg = kJTheta+kRegTerm;

    // Updates "grad" for nlopt.
    const int kReturnCode = this->ComputeGradient(data_multi);
    const arma::vec kCurrentGradient = gradient();
    for(int feature_index=0; feature_index<kNumFeatures; feature_index++)
    {
        grad[feature_index] = kCurrentGradient(feature_index);
    }

    printf("kJThetaReg = %.6f\n",kJThetaReg);

    return kJThetaReg;
}
Esempio n. 6
0
extern void dispatcher(Queue* in, Queue* out, pthread_mutex_t *mutex){
  pthread_mutex_lock(mutex);
  if (!is_empty(in)){
    uint8_t packet_length = queue_head(in);
    if (in->count >= packet_length){
      uint8_t count = dequeue(in);
      t_command command = dequeue(in);
      printf ("%s receive packet with command %i\n", KNRM, command);

      switch (command){
        case INIT:
          init(in);
          acknowledge(out);
          break;
        case SET_DIAM_WHEELS:
          set_diam_wheels(in);
          acknowledge(out);
          break;
        case SET_WHEELS_SPACING:
          set_wheels_spacing(in);
          acknowledge(out);
          break;
        case SET_PID_TRSL:
          set_pid_trsl(in);
          acknowledge(out);
          break;
        case SET_PID_ROT:
          set_pid_rot(in);
          acknowledge(out);
          break;
        case SET_TRSL_ACC:
          set_trsl_acc(in);
          acknowledge(out);
          break;
        case SET_TRSL_DEC:
          set_trsl_dec(in);
          acknowledge(out);
          break;
        case SET_TRSL_MAXSPEED:
          set_trsl_max(in);
          acknowledge(out);
          break;
        case SET_ROT_ACC:
          set_rot_acc(in);
          acknowledge(out);
          break;
        case SET_ROT_DEC:
          set_rot_dec(in);
          acknowledge(out);
          break;
        case SET_ROT_MAXSPEED:
          set_rot_max(in);
          acknowledge(out);
          break;
        case SET_DELTA_MAX_ROT:
          set_delta_max_rot(in);
          acknowledge(out);
          break;
        case SET_DELTA_MAX_TRSL:
          set_delta_max_trsl(in);
          acknowledge(out);
          break;
        case SET_PWM:
          set_pwm(in);
          acknowledge(out);
          break;
        case SET_X:
          set_x(in);
          acknowledge(out);
          break;
        case SET_Y:
          set_y(in);
          acknowledge(out);
          break;
        case SET_THETA:
          set_theta(in);
          acknowledge(out);
          break;
        case TELEMETRY:
          set_telemetry(in);
          acknowledge(out);
          break;
        case SET_DEBUG:
          set_debug(in);
          acknowledge(out);
          break;
        case GOTO_XY:
          goto_xy(in);
          acknowledge(out);
          break;
        case GOTO_THETA:
          goto_theta(in);
          acknowledge(out);
          break;
        case FREE:
          execute_free(in);
          acknowledge(out);
          break;
        case STOP_ASAP:
          stop_asap(in);
          acknowledge(out);
          break;
        case MOVE_TRSL:
          move_trsl(in);
          acknowledge(out);
          break;
        case MOVE_ROT:
          move_rot(in);
          acknowledge(out);
          break;
        case RECALAGE:
          execute_recalage(in);
          acknowledge(out);
          break;
        case CURVE:
          curve(in);
          acknowledge(out);
          break;
        case GET_POSITION:
          getPosition(out);
          break;
        case GET_PID_TRSL:
          getPIDtrsl(out);
          break;
        case GET_PID_ROT:
          getPIDRot(out);
          break;
        case GET_SPEEDS:
          getSpeed(out);
          break;
        case GET_WHEELS:
          getWheels(out);
          break;
        case GET_DELTA_MAX:
          getDeltaMax(out);
          break;
        case GET_VECTOR_TRSL:
          getVectorTrsl (out);
          break;
        case GET_VECTOR_ROT:
          getVectorRot (out);
          break;
      default:
          printf ("/!\\ command error: %i\n", command);
          send_error(out, COULD_NOT_READ);
          break;
      }
    }
  }
  pthread_mutex_unlock(mutex);
}