Exemplo n.º 1
0
static void net_get_input_arrays(MEX_ARGS) {
  mxCHECK(nrhs == 2 && mxIsStruct(prhs[0]),
      "Usage: caffe_('net_get_input_arrays', hNet, memory_data_layer_idx)");

  int layer_idx = *((int*)mxGetData(prhs[1]));

  Net<float>* net = handle_to_ptr<Net<float> >(prhs[0]);

  shared_ptr<MemoryDataLayer<float> > md_layer =
    boost::dynamic_pointer_cast< MemoryDataLayer<float> >(net->layers()[layer_idx]);
  if (!md_layer) {
    mxERROR("get_input_arrays may only be called if the layer is a MemoryDataLayer");
  }

  int dims[4]; 
  dims[0] = md_layer->batch_size();
  dims[1] = md_layer->channels();
  dims[2] = md_layer->height();
  dims[3] = md_layer->width();
  int md_layer_count = dims[0]*dims[1]*dims[2]*dims[3];
  const float* md_layer_data = md_layer->get_data_ptr();

  mexPrintf("Got layer dims");

  mxArray* mx_mat = mxCreateNumericArray(4, dims, mxSINGLE_CLASS, mxREAL);
  
  float* mat_mem_ptr = reinterpret_cast<float*>(mxGetData(mx_mat));

  caffe_copy(md_layer_count, md_layer_data, mat_mem_ptr);
  plhs[0] = mx_mat;
}
Exemplo n.º 2
0
void computeFeatures(Net<Dtype>& caffe_test_net,
    const vector<Mat>& imgs,
    string LAYER,
    int BATCH_SIZE,
    vector<vector<Dtype>>& output) {
  int nImgs = imgs.size();
  int nBatches = ceil(nImgs * 1.0f / BATCH_SIZE);
  for (int batch = 0; batch < nBatches; batch++) {
    int actBatchSize = min(nImgs - batch * BATCH_SIZE, BATCH_SIZE);
    vector<Mat> imgs_b;
    if (actBatchSize >= BATCH_SIZE) {
      imgs_b.insert(imgs_b.end(), imgs.begin() + batch * BATCH_SIZE, 
          imgs.begin() + (batch + 1) * BATCH_SIZE);
    } else {
      imgs_b.insert(imgs_b.end(), imgs.begin() + batch * BATCH_SIZE, imgs.end());
      for (int j = actBatchSize; j < BATCH_SIZE; j++)
        imgs_b.push_back(imgs[0]);
    }
    vector<int> dvl(BATCH_SIZE, 0);
    boost::dynamic_pointer_cast<caffe::MemoryDataLayer<Dtype>>(
        caffe_test_net.layers()[0])->AddMatVector(imgs_b, dvl);
    vector<Blob<Dtype>*> dummy_bottom_vec;
    Dtype loss = 0.0f;
    caffe_test_net.ForwardPrefilled(&loss);
    const boost::shared_ptr<Blob<Dtype>> feat = caffe_test_net.blob_by_name(LAYER);
    for (int i = 0; i < actBatchSize; i++) {
      Dtype* feat_data = feat->mutable_cpu_data() + feat->offset(i);
      output.push_back(vector<Dtype>(feat_data, feat_data + feat->count() / feat->num()));
    }
    LOG(INFO) << "Batch " << batch << "/" << nBatches << " (" << actBatchSize << " images) done";
  }
}
Exemplo n.º 3
0
void GradientChecker<Dtype>::CheckGradientNet(
    const Net<Dtype>& net, const vector<Blob<Dtype>*>& input) {
  const vector<shared_ptr<Layer<Dtype> > >& layers = net.layers();
  vector<vector<Blob<Dtype>*> >& bottom_vecs = net.bottom_vecs();
  vector<vector<Blob<Dtype>*> >& top_vecs = net.top_vecs();
  for (int_tp i = 0; i < layers.size(); ++i) {
    net.Forward(input);
    LOG(ERROR)<< "Checking gradient for " << layers[i]->layer_param().name();
    CheckGradientExhaustive(*(layers[i].get()), bottom_vecs[i], top_vecs[i]);
  }
}
Exemplo n.º 4
0
static void net_set_input_arrays(MEX_ARGS) {
  mxCHECK(nrhs == 4 && mxIsStruct(prhs[0]) && mxIsSingle(prhs[1]) && mxIsSingle(prhs[2]),
      "Usage: caffe_('net_set_input_arrays', hNet, new_data_in, new_data_out)");

  
  //int fill_trailing_dimensions=*((int*)mxGetData(prhs[4]));

  int layer_idx = *((int*)mxGetData(prhs[3]));
  // check that this network has an input MemoryDataLayer
  Net<float>* net = handle_to_ptr<Net<float> >(prhs[0]);
  //MemoryDataLayer<float>* layer = MemoryDataLayer<float>(net->layers()[0]);

  shared_ptr<MemoryDataLayer<float> > md_layer =
    boost::dynamic_pointer_cast< MemoryDataLayer<float> >(net->layers()[layer_idx]);
  if (!md_layer) {
    mxERROR("set_input_arrays may only be called if the layer is a MemoryDataLayer");
  }

 // mxArray* fixed_data = check_contiguous_array(prhs[1], "data array", md_layer->channels(),
  //    md_layer->height(), md_layer->width(),fill_trailing_dimensions);
 // mxArray* fixed_labels = check_contiguous_array(prhs[2], "labels array", 1, 1, 1, 1);

  const int* data_dims_array = mxGetDimensions(prhs[1]);
  const int* label_dims_array = mxGetDimensions(prhs[2]);


  if (data_dims_array[0] != label_dims_array[0]) {
    mxERROR("data and labels must have the same first dimension");
  }
  if (data_dims_array[0] % md_layer->batch_size() != 0) {
    mxERROR("first dimensions of input arrays must be a multiple of batch size");
  }


  const int data_numel=mxGetNumberOfElements(prhs[1]);
  const int labels_numel=mxGetNumberOfElements(prhs[2]);
  
  float* input_data = (float *) malloc(data_numel*sizeof(float));
  float* input_labels = (float *) malloc(labels_numel*sizeof(float));

  caffe_copy(data_numel, (float*)(mxGetData(prhs[1])), input_data);
  caffe_copy(labels_numel, (float*)(mxGetData(prhs[2])), input_labels);



  md_layer->Reset(input_data,input_labels,data_dims_array[0]);
  
}
Exemplo n.º 5
0
// Usage: caffe_('net_get_attr', hNet)
static void net_get_attr(MEX_ARGS) {
	mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]),
		"Usage: caffe_('net_get_attr', hNet)");
	Net<float>* net = handle_to_ptr<Net<float> >(prhs[0]);
	const int net_attr_num = 6;
	const char* net_attrs[net_attr_num] = { "hLayer_layers", "hBlob_blobs",
		"input_blob_indices", "output_blob_indices", "layer_names", "blob_names" };
	mxArray* mx_net_attr = mxCreateStructMatrix(1, 1, net_attr_num,
		net_attrs);
	mxSetField(mx_net_attr, 0, "hLayer_layers",
		ptr_vec_to_handle_vec<Layer<float> >(net->layers()));
	mxSetField(mx_net_attr, 0, "hBlob_blobs",
		ptr_vec_to_handle_vec<Blob<float> >(net->blobs()));
	mxSetField(mx_net_attr, 0, "input_blob_indices",
		int_vec_to_mx_vec(net->input_blob_indices()));
	mxSetField(mx_net_attr, 0, "output_blob_indices",
		int_vec_to_mx_vec(net->output_blob_indices()));
	mxSetField(mx_net_attr, 0, "layer_names",
		str_vec_to_mx_strcell(net->layer_names()));
	mxSetField(mx_net_attr, 0, "blob_names",
		str_vec_to_mx_strcell(net->blob_names()));
	plhs[0] = mx_net_attr;
}
Exemplo n.º 6
0
EXPORT int caffe_net_layers_size(void *netAnon)
{
	Net<float> *net = (Net<float> *)netAnon;
	return net->layers().size();
}
Exemplo n.º 7
0
EXPORT void *caffe_net_layer(void *netAnon, int i)
{
	Net<float> *net = (Net<float> *)netAnon;
	return net->layers()[i].get();
}
Exemplo n.º 8
0
  static void net_get_loss_diff(MEX_ARGS) {

   mxCHECK(nrhs == 2 && mxIsStruct(prhs[0]),"Usage: caffe_('net_get_loss_diff', hNet, layer_index)");
   Net<float>* net = handle_to_ptr<Net<float> >(prhs[0]);

   int layer_idx = *((int*)mxGetData(prhs[1]));

   shared_ptr<EuclideanLossLayer<float> > loss = boost::dynamic_pointer_cast< EuclideanLossLayer<float> >(net->layers()[layer_idx-1]); 
   if (!loss) {
     const vector< string >  net_layer_names = net->layer_names();
     mexPrintf("layer_name: %s\n",(net_layer_names[layer_idx]).c_str());
     mxERROR("net_get_jacobian may only be called if the layer is a EuclideanLossLayer");
   }else{
     mexPrintf("Found loss layer...\n");  
   }


   Blob<float> tmp_loss_diff(loss->Get_diff_shape(0), loss->Get_diff_shape(1), loss->Get_diff_shape(2), loss->Get_diff_shape(3));
   Blob<float>* actual_diff= (Blob<float>*)loss->Get_internal_diff();
   (&tmp_loss_diff)->CopyFrom(*actual_diff);
   plhs[0] = blob_to_mx_mat(&tmp_loss_diff, DATA);
 }