コード例 #1
0
  static std::vector<Point2d> drawKeypoints(Blob<Dtype>* blob,int nIter,Mat&vis,Scalar color)
  {
    // check geometry
    CHECK_LE(nIter-1,blob->num());
    CHECK_EQ(blob->channels(),2*jvl::active_keypoints.size());
    CHECK_EQ(blob->height(),1);
    CHECK_EQ(blob->width(),1);
    
    const Dtype* label_data = blob->cpu_data() + blob->offset(nIter);
    Mat label_mat(2*jvl::active_keypoints.size(),1,DataType<Dtype>::type);
    caffe_copy(blob->channels()*blob->width()*blob->height(),label_data,label_mat.ptr<Dtype>());

    std::vector<Point2d> points;
    for(int iter = 0; iter < 2*jvl::active_keypoints.size(); iter +=2 )
    {
      Dtype u = sf*label_mat.at<Dtype>(iter);
      Dtype v = sf*label_mat.at<Dtype>(iter+1);
      cv::putText(vis,jvl::printfpp("%d",iter),Point2d(u,v),FONT_HERSHEY_SIMPLEX,.5,color);
      points.push_back(Point2d(u,v));
    }
    return points;
  }  
コード例 #2
0
// Arguments "opt_param" and "grad" are updated by nlopt.
// "opt_param" corresponds to the optimization parameters.
// "grad" corresponds to the gradient.
double NeuralNetwork::ComputeCost(const std::vector<double> &opt_param,
                                  std::vector<double> &grad,const DataMulti &data_multi) {
    assert(num_layers_ >= 2);
    const int kNumFeatures = hidden_layer_size_*(input_layer_size_+1)+\
                             output_layer_size_*(hidden_layer_size_+1);
    assert(kNumFeatures >= 1);
    const int kNumTrainEx = data_multi.num_train_ex();
    assert(kNumTrainEx >= 1);
    arma::vec nlopt_theta = arma::randu<arma::vec>(kNumFeatures,1);

    // Uses current value of "theta" from nlopt.
    for(int feature_index=0; feature_index<kNumFeatures; feature_index++)
    {
        nlopt_theta(feature_index) = opt_param[feature_index];
    }

    // Extracts Theta1 and Theta2 from nlopt_theta.
    const arma::vec kNLoptTheta1 = \
                                   nlopt_theta.rows(0,hidden_layer_size_*(input_layer_size_+1)-1);
    const arma::vec kNLoptTheta2 = \
                                   nlopt_theta.rows(hidden_layer_size_*(input_layer_size_+1),\
                                           hidden_layer_size_*(input_layer_size_+1)+\
                                           output_layer_size_*(hidden_layer_size_+1)-1);

    // Reshapes Theta1 and Theta2 as matrices.
    const arma::mat kNLoptTheta1Mat = \
                                      arma::reshape(kNLoptTheta1,hidden_layer_size_,input_layer_size_+1);
    const arma::mat kNLoptTheta2Mat = \
                                      arma::reshape(kNLoptTheta2,output_layer_size_,hidden_layer_size_+1);
    std::vector<arma::mat> current_theta;
    current_theta.push_back(kNLoptTheta1Mat);
    current_theta.push_back(kNLoptTheta2Mat);
    set_theta(current_theta);

    // Runs feedforward propagation.
    arma::mat layer_activation_in = data_multi.training_features();
    arma::mat sigmoid_arg = layer_activation_in*theta_.at(0).t();
    arma::mat layer_activation_out = ComputeSigmoid(sigmoid_arg);
    for(int layer_index=1; layer_index<num_layers_; layer_index++)
    {
        const arma::mat kOnesMat = arma::ones(layer_activation_out.n_rows,1);
        layer_activation_in = arma::join_horiz(kOnesMat,layer_activation_out);
        sigmoid_arg = layer_activation_in*theta_.at(layer_index).t();
        layer_activation_out = ComputeSigmoid(sigmoid_arg);
    }

    // Sets up matrix of (multi-class) training labels.
    arma::mat label_mat = arma::randu<arma::mat>(kNumTrainEx,output_layer_size_);
    label_mat.zeros(kNumTrainEx,output_layer_size_);
    for(int example_index=0; example_index<kNumTrainEx; example_index++)
    {
        int col_index = \
                        (int)arma::as_scalar(data_multi.training_labels()(example_index));
        label_mat(example_index,col_index-1) = 1.0;
    }

    // Computes regularized neural network cost.
    const arma::mat kCostFuncVal = \
                                   (-1)*(label_mat%arma::log(layer_activation_out)+\
                                         (1-label_mat)%arma::log(1-layer_activation_out));
    const double kJTheta = accu(kCostFuncVal)/kNumTrainEx;
    const arma::mat kNLoptTheta1MatSquared = kNLoptTheta1Mat%kNLoptTheta1Mat;
    const arma::mat kNLoptTheta2MatSquared = kNLoptTheta2Mat%kNLoptTheta2Mat;
    const arma::mat kNLoptTheta1MatSquaredTrans = kNLoptTheta1MatSquared.t();
    const arma::mat kNLoptTheta2MatSquaredTrans = kNLoptTheta2MatSquared.t();
    const double kRegTerm = \
                            (lambda()/(2*kNumTrainEx))*(accu(kNLoptTheta1MatSquaredTrans)+\
                                    accu(kNLoptTheta2MatSquaredTrans)-\
                                    accu(kNLoptTheta1MatSquaredTrans.row(0))-\
                                    accu(kNLoptTheta2MatSquaredTrans.row(0)));
    const double kJThetaReg = kJTheta+kRegTerm;

    // Updates "grad" for nlopt.
    const int kReturnCode = this->ComputeGradient(data_multi);
    const arma::vec kCurrentGradient = gradient();
    for(int feature_index=0; feature_index<kNumFeatures; feature_index++)
    {
        grad[feature_index] = kCurrentGradient(feature_index);
    }

    printf("kJThetaReg = %.6f\n",kJThetaReg);

    return kJThetaReg;
}