/**
     * Get the prediction response for the given image.
     * @param originImage image, which should be predicted
     * @param resultLayer the name of the result layer
     * @param dataLayer   the name of the data layer
     * @param predictions the predictions
     */
    void CaffeClassifier::predict(std::vector<cv::Mat> originImages, std::vector<int> labels, string resultLayer,
                                  string dataLayer, vector<short> & predictions) {
        vector<Datum> vecDatum;

        for (int i = 0; i < originImages.size(); i++) {
            cv::Mat originImage = originImages[i];

            // resize image
            Mat image;
            if (originImage.cols != imageSize.width || originImage.rows != imageSize.height) {
                resize(originImage, image, imageSize);
            } else
                image = originImage;

            // check channels
            if (channels != image.channels()) {
                cerr << "Error: the channel number of input image is invalid for CNN classifier!" << endl;
                exit(1);
            }

            // mat to datum
            Datum datum;
            CVMatToDatum(image, &datum);
            datum.set_label(labels[i]);
            vecDatum.push_back(datum);
            image.release();
        }

        // get the data layer
        const caffe::shared_ptr<MemoryDataLayer<float>> memDataLayer = boost::static_pointer_cast<MemoryDataLayer<float>> (caffeNet->layer_by_name(dataLayer));

        // push new image data
        memDataLayer->AddDatumVector(vecDatum);
        //memDataLayer->ExactNumBottomBlobs();

        // do forward pass
        vector<Blob<float>*> inputVec;
        caffeNet->Forward(inputVec);

        // get results
        const caffe::shared_ptr<Blob<float> > featureBlob = caffeNet->blob_by_name(resultLayer);
        int batchSize = featureBlob->num();
        int dimFeatures = featureBlob->count() / batchSize;
//        std::cout << "Batch size is " << batchSize << "/ dim features is " << dimFeatures << std::endl;

        // get output from each channel
        for (int n = 0; n < batchSize; ++n) {
            float* fs = featureBlob->mutable_cpu_data() + featureBlob->offset(n);
            if (sizeof(fs) > 0) {
                vector<float> feature_vector(fs, fs + dimFeatures);
                predictions.insert(predictions.end(), feature_vector.begin(), feature_vector.end());
            }
        }

        // release data
        // for (Datum d : vecDatum) {
        //     d.release_data();
        // }
    }
Esempio n. 2
0
	void
	extract_dense(nv_matrix_t *vlad, int j,
				  const nv_matrix_t *image,
				  nv_keypoint_dense_t *dense,
				  int ndense
		)
	{
		NV_ASSERT(vlad->n == DIM);
		int desc_m;
		nv_matrix_t *key_vec;
		nv_matrix_t *desc_vec;
		nv_matrix_t *resize, *gray, *smooth;
		
		int i;
		int km = 0;

		if (m_fit_area == 0) {
			float scale = IMG_SIZE() / (float)NV_MAX(image->rows, image->cols);	
			resize = nv_matrix3d_alloc(3, (int)(image->rows * scale),
									   (int)(image->cols * scale));
		} else {
			float axis_ratio = (float)image->rows / image->cols;
			int new_cols = (int)sqrtf(m_fit_area / axis_ratio);
			int new_rows = (int)((float)m_fit_area / new_cols);
			resize = nv_matrix3d_alloc(3, new_rows, new_cols);
		}
		gray = nv_matrix3d_alloc(1, resize->rows, resize->cols);
		smooth = nv_matrix3d_alloc(1, resize->rows, resize->cols);
		
		for (i = 0; i < ndense; ++i) {
			km += dense[i].rows * dense[i].cols;
		}
		km *= 2;
		key_vec = nv_matrix_alloc(NV_KEYPOINT_KEYPOINT_N, km);
		desc_vec = nv_matrix_alloc(NV_KEYPOINT_DESC_N, km);
		nv_resize(resize, image);
		nv_gray(gray, resize);
		nv_gaussian5x5(smooth, 0, gray, 0);
		
		nv_matrix_zero(desc_vec);
		nv_matrix_zero(key_vec);
		
		desc_m = nv_keypoint_dense_ex(m_ctx, key_vec, desc_vec, smooth, 0,
									  dense, ndense);
		feature_vector(vlad, j, key_vec, desc_vec, desc_m);
		
		nv_matrix_free(&gray);
		nv_matrix_free(&resize);
		nv_matrix_free(&smooth);
		nv_matrix_free(&key_vec);
		nv_matrix_free(&desc_vec);
	}
Esempio n. 3
0
DenseMatrix dense_matrix_from_features(FeaturesCallback features,
                                       IndexType dimension,
                                       RandomAccessIterator begin, 
                                       RandomAccessIterator end)
{
	DenseMatrix matrix(dimension, end-begin);
	DenseVector feature_vector(dimension);

	for (RandomAccessIterator iter=begin; iter!=end; ++iter)
	{
		features.vector(*iter,feature_vector);
		matrix.col(iter-begin).array() = feature_vector;
	}

	return matrix;
}
Esempio n. 4
0
// Helper computes one or more features corresponding to the given points.
// Emitted features are on the line defined by:
// start_pt + lambda * (end_pt - start_pt) for scalar lambda.
// Features are spaced at feature_length intervals.
static int ComputeFeatures(const FCOORD& start_pt, const FCOORD& end_pt,
                           double feature_length,
                           GenericVector<INT_FEATURE_STRUCT>* features) {
  FCOORD feature_vector(end_pt - start_pt);
  if (feature_vector.x() == 0.0f && feature_vector.y() == 0.0f) return 0;
  // Compute theta for the feature based on its direction.
  uint8_t theta = feature_vector.to_direction();
  // Compute the number of features and lambda_step.
  double target_length = feature_vector.length();
  int num_features = IntCastRounded(target_length / feature_length);
  if (num_features == 0) return 0;
  // Divide the length evenly into num_features pieces.
  double lambda_step = 1.0 / num_features;
  double lambda = lambda_step / 2.0;
  for (int f = 0; f < num_features; ++f, lambda += lambda_step) {
    FCOORD feature_pt(start_pt);
    feature_pt += feature_vector * lambda;
    INT_FEATURE_STRUCT feature(feature_pt, theta);
    features->push_back(feature);
  }
  return num_features;
}