Example #1
0
// Usage: caffe_('solver_teststep_multigpu')
static void solver_teststep_multigpu(MEX_ARGS) {
	mxCHECK(nrhs == 0,
		"Usage: caffe_('solver_teststep_multigpu')");
	
	vector<shared_ptr<P2PSync<float>>>* sync_vec = sync_ptr->get_syncs();

	int initial_device;
	CUDA_CHECK(cudaGetDevice(&initial_device));
	for (int i = 0; i < sync_vec->size(); i++)
	{
		Solver<float> *solver;
		if (i == 0)
		{
			solver = sync_ptr->solver().get();
		}
		else
		{
			solver = (*sync_vec)[i]->solver().get();
		}

		Net<float> *net = solver->net().get();
		CUDA_CHECK(cudaSetDevice(solver->param().device_id()));
		net->ForwardPrefilled();
		CUDA_CHECK(cudaSetDevice(initial_device));
	}
}
Example #2
0
void computeFeatures(Net<Dtype>& caffe_test_net,
    const vector<Mat>& imgs,
    string LAYER,
    int BATCH_SIZE,
    vector<vector<Dtype>>& output) {
  int nImgs = imgs.size();
  int nBatches = ceil(nImgs * 1.0f / BATCH_SIZE);
  for (int batch = 0; batch < nBatches; batch++) {
    int actBatchSize = min(nImgs - batch * BATCH_SIZE, BATCH_SIZE);
    vector<Mat> imgs_b;
    if (actBatchSize >= BATCH_SIZE) {
      imgs_b.insert(imgs_b.end(), imgs.begin() + batch * BATCH_SIZE, 
          imgs.begin() + (batch + 1) * BATCH_SIZE);
    } else {
      imgs_b.insert(imgs_b.end(), imgs.begin() + batch * BATCH_SIZE, imgs.end());
      for (int j = actBatchSize; j < BATCH_SIZE; j++)
        imgs_b.push_back(imgs[0]);
    }
    vector<int> dvl(BATCH_SIZE, 0);
    boost::dynamic_pointer_cast<caffe::MemoryDataLayer<Dtype>>(
        caffe_test_net.layers()[0])->AddMatVector(imgs_b, dvl);
    vector<Blob<Dtype>*> dummy_bottom_vec;
    Dtype loss = 0.0f;
    caffe_test_net.ForwardPrefilled(&loss);
    const boost::shared_ptr<Blob<Dtype>> feat = caffe_test_net.blob_by_name(LAYER);
    for (int i = 0; i < actBatchSize; i++) {
      Dtype* feat_data = feat->mutable_cpu_data() + feat->offset(i);
      output.push_back(vector<Dtype>(feat_data, feat_data + feat->count() / feat->num()));
    }
    LOG(INFO) << "Batch " << batch << "/" << nBatches << " (" << actBatchSize << " images) done";
  }
}
Example #3
0
// Usage: caffe_('net_forward', hNet)
static void net_forward(MEX_ARGS) {
	mxCHECK(nrhs <= 3 && mxIsStruct(prhs[0]),
		"Usage: caffe_('net_forward', hNet, from_layer=0, to_layer=end)");
	Net<float>* net = handle_to_ptr<Net<float> >(prhs[0]);
	if (nrhs == 1)
		net->ForwardPrefilled();
	else if (nrhs == 2)
	{
		mxCHECK(mxIsDouble(prhs[1]),
			"Usage: caffe_('net_forward', hNet, from_layer=0, to_layer=end)");
		net->ForwardFrom((int)mxGetScalar(prhs[1]));
	}
	else if (nrhs == 3)
	{
		mxCHECK(mxIsDouble(prhs[1]) && mxIsDouble(prhs[2]),
			"Usage: caffe_('net_forward', hNet, from_layer=0, to_layer=end)");
		net->ForwardFromTo((int)mxGetScalar(prhs[1]), (int)mxGetScalar(prhs[2]));
	}
}
Example #4
0
// Usage: caffe_('net_forward', hNet)
static void net_forward(MEX_ARGS) {
	mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]),
		"Usage: caffe_('net_forward', hNet)");
	Net<float>* net = handle_to_ptr<Net<float> >(prhs[0]);
	net->ForwardPrefilled();
}