예제 #1
0
void read_hdf5_image(H5File h5f, Mat &image_out, const char *name, const Rect &roi=Rect(0,0,0,0))
{
    DataSet dataset = h5f.openDataSet(name);
    DataSpace dspace = dataset.getSpace();
    assert (dspace.getSimpleExtentNdims() == 2);
    hsize_t dims[2];
    dspace.getSimpleExtentDims(dims);
    if ((roi.width == 0) && (roi.height == 0)) {
        image_out.create(dims[0], dims[1], CV_32F);
        dspace.selectAll();
    } else {
        image_out.create(roi.height, roi.width, CV_32F);
        hsize_t _offset[2], _size[2];
        _offset[0] = roi.y; _offset[1] = roi.x;
        _size[0] = roi.height; _size[1] = roi.width;
        dspace.selectHyperslab(H5S_SELECT_SET, _size, _offset);
    }
    
    DataSpace imspace;
    float *imdata;
    if (image_out.isContinuous()) {
        dims[0] = image_out.size().height; dims[1] = image_out.size().width;
        imspace = DataSpace(2, dims);
        imspace.selectAll();
        imdata = image_out.ptr<float>();
    } else {
        // we are working with an ROI
        assert (image_out.isSubmatrix());
        Size parent_size; Point parent_ofs;
        image_out.locateROI(parent_size, parent_ofs);
        hsize_t parent_count[2];
        parent_count[0] = parent_size.height; parent_count[1] = parent_size.width;
        imspace.setExtentSimple(2, parent_count);
        hsize_t im_offset[2], im_size[2];
        im_offset[0] = parent_ofs.y; im_offset[1] = parent_ofs.x;
        im_size[0] = image_out.size().height; im_size[1] = image_out.size().width;
        imspace.selectHyperslab(H5S_SELECT_SET, im_size, im_offset);
        imdata = image_out.ptr<float>() - parent_ofs.x - parent_ofs.y * parent_size.width;
    }
    dataset.read(imdata, PredType::NATIVE_FLOAT, imspace, dspace);
}
예제 #2
0
void write_hdf5_image(H5File h5f, const char *name, const Mat &im)
{
    DSetCreatPropList cparms;
    hsize_t chunk_dims[2] = {256, 256};
    hsize_t dims[2];
    dims[0] = im.size().height;
    dims[1] = im.size().width;
  
    if (chunk_dims[0] > dims[0]) {
        chunk_dims[0] = dims[0];
    }
    if (chunk_dims[1] > dims[1]) {
        chunk_dims[1] = dims[1];
    }

    cparms.setChunk(2, chunk_dims);
    cparms.setShuffle();
    cparms.setDeflate(5);

    DataSet dataset = h5f.createDataSet(name, PredType::NATIVE_FLOAT,
                                        DataSpace(2, dims, dims),
                                        cparms);

    Mat image;
    if (im.type() !=  CV_32F)
        im.convertTo(image, CV_32F);
    else
        image = im;
    
    DataSpace imspace;
    float *imdata;
    if (image.isContinuous()) {
        imspace = dataset.getSpace(); // same size as 
        imspace.selectAll();
        imdata = image.ptr<float>();
    } else {
        // we are working with an ROI
        assert (image.isSubmatrix());
        Size parent_size; Point parent_ofs;
        image.locateROI(parent_size, parent_ofs);
        hsize_t parent_count[2];
        parent_count[0] = parent_size.height; parent_count[1] = parent_size.width;
        imspace.setExtentSimple(2, parent_count);
        hsize_t im_offset[2], im_size[2];
        im_offset[0] = parent_ofs.y; im_offset[1] = parent_ofs.x;
        im_size[0] = image.size().height; im_size[1] = image.size().width;
        imspace.selectHyperslab(H5S_SELECT_SET, im_size, im_offset);
        imdata = image.ptr<float>() - parent_ofs.x - parent_ofs.y * parent_size.width;
    }
    dataset.write(imdata, PredType::NATIVE_FLOAT, imspace);
}
예제 #3
0
void write_feature(H5File h5f, const Mat &image_in, const char *name)
{
    // make sure the sizes match
    assert (imsize == image_in.size());

    // make sure the image is in native float
    Mat image;
    if (image_in.type() !=  CV_32F)
        image_in.convertTo(image, CV_32F);
    else
        image = image_in;
    
    DataSet dataset = create_dataset(h5f, name);

    DataSpace imspace;
    float *imdata;
    if (image.isContinuous()) {
        imspace = dataset.getSpace(); // same size as 
        imspace.selectAll();
        imdata = image.ptr<float>();
    } else {
        // we are working with an ROI
        assert (image.isSubmatrix());
        Size parent_size; Point parent_ofs;
        image.locateROI(parent_size, parent_ofs);
        hsize_t parent_count[2];
        parent_count[0] = parent_size.height; parent_count[1] = parent_size.width;
        imspace.setExtentSimple(2, parent_count);
        hsize_t im_offset[2], im_size[2];
        im_offset[0] = parent_ofs.y; im_offset[1] = parent_ofs.x;
        im_size[0] = image.size().height; im_size[1] = image.size().width;
        imspace.selectHyperslab(H5S_SELECT_SET, im_size, im_offset);
        imdata = image.ptr<float>() - parent_ofs.x - parent_ofs.y * parent_size.width;
    }
    dataset.write(imdata, PredType::NATIVE_FLOAT, imspace);
}
예제 #4
0
bool Wrapper_i_hdf::priv_init(int fr_count)
{ 
  if(locked_)
    return false;
  
  try
  {
    
    
    H5File * file = new H5File( file_name_, H5F_ACC_RDONLY );  
    if(two_d_data_)
    {
      const string nop_str = "number-of-planes";
      Group g = file->openGroup("/");
      Attr_list_hdf atr_list(&g);
      if(!atr_list.contains_attr(nop_str))
	throw logic_error("wrapper_i_hdf: number-of-planes not found in file");
      atr_list.get_value(nop_str,frame_count_);
    }
    else
    {
      /**
	 @todo deal with this better, don't have any data 
       */
      frame_count_ = 1;
    }
    

    

    if(fr_count != 0)
    {
      if(fr_count + start_ > frame_count_)
	throw runtime_error("wrapper_i_hdf: asking for too many frames");
      frame_count_ = fr_count;
      
    }
    
    
    
    // logic to set up data maps and data storage
    int i_count =0;
    int f_count =0;
    int c_count =0;
    
    for(set<pair<D_TYPE,int> >::iterator it = data_types_.begin();
	it != data_types_.end();++it)
    {
      D_TYPE cur = (*it).first;
      
      switch(v_type(cur))
      {
      case V_INT:
	data_i_.push_back(vector<int*>(frame_count_));
	d_mapi_.set_lookup(cur,i_count++);
	break;
      case V_FLOAT:
	data_f_.push_back(vector<float*>(frame_count_));
	d_mapf_.set_lookup(cur,f_count++);
	break;
      case V_COMPLEX:
	data_c_.push_back(vector<complex<float>*>(frame_count_));
	d_mapc_.set_lookup(cur,c_count++);
	break;
      case V_STRING:
      case V_BOOL:
      case V_GUID:
      case V_TIME:
      case V_UINT:
      case V_ERROR:
	throw logic_error("wrapper_i_hdf: The data type should not have been " + VT2str_s(v_type(cur)));
      }
    }

    frame_c_.reserve(frame_count_);
    if(two_d_data_)
      frame_zdata_.resize(frame_count_);
    

    // set the size of the md_store
    set_Md_store_size(frame_count_);
    
    // fill in data
    // assume that the frames run from 0 to frame_count_
    for(unsigned int j = 0; j<frame_count_;++j)
    {
      string frame_name = format_name(j+start_);
      Group * frame = new Group(file->openGroup(frame_name));
      
      Attr_list_hdf g_attr_list(frame);
      
      set_Md_store(j,new Md_store(g_attr_list));
      
      
      if(two_d_data_)
      {
	if(!g_attr_list.contains_attr("z-position"))
	  throw logic_error("wrapper_i_hdf: z-position not found");
	g_attr_list.get_value("z-position",frame_zdata_[j]);
      }
      
      
      for(set<pair<D_TYPE,int> >::iterator it = data_types_.begin();
	  it != data_types_.end();++it)
      {
	
	if(two_d_data_ && ((*it).first)==utilities::D_ZPOS)
	  continue;

	// ***************	
	DataSet * dset = new DataSet(frame->openDataSet(format_dset_name((*it).first,(*it).second)));
	// ***************	
	DataSpace dspace = dset-> getSpace();
	dspace.selectAll();
	int part_count = dspace.getSimpleExtentNpoints();
	
	// if the first data set for this frame set the number of particles
	if(frame_c_.size()==j)
	  frame_c_.push_back(part_count);
	// if the part_count is less than a previous dataset, set the
	// number of particles to be the smaller number.  This
	// shouldn't result in memory leaks as the bare arrays are
	// never returned
	else if(frame_c_.at(j) > part_count)
	  frame_c_.at(j) = part_count;
	// if the current set has more than a previous set, keep the
	// old value.  these checks are a kludge, need to deal with
	// this better at the level of writing out the data
	else if(frame_c_.at(j) < part_count)
	  continue;
	// if(frame_c_.at(j) != part_count)
	//   throw runtime_error("wrapper_i_hdf: data sets different sizes");
	D_TYPE cur_type = (*it).first;
	
	switch(v_type(cur_type))
	{
	case V_INT:
	  data_i_.at(d_mapi_(cur_type)).at(j) = new int [part_count];
	  dset->read(data_i_.at(d_mapi_(cur_type)).at(j),PredType::NATIVE_INT);
	  
	  break;
	case V_FLOAT:
	  data_f_.at(d_mapf_(cur_type)).at(j) = new float [part_count];
	  dset->read(data_f_.at(d_mapf_(cur_type)).at(j),PredType::NATIVE_FLOAT);
	  break;
	case V_COMPLEX:
	  throw logic_error("not implemented yet");
	  
	  break;
	case V_STRING:
	case V_BOOL:
	case V_GUID:
	case V_TIME:
	case V_UINT:
	case V_ERROR:
	  throw logic_error("wrapper_i_hdf: The data type should not have been " + VT2str_s(v_type(cur_type)));
      	}
	
	// clean up hdf stuff
	dset->close();
	delete dset;
	dset = NULL;
      }
      frame->close();
      
      delete frame;
      frame = NULL;
      
    }
    file->close();
    

    delete file;
    
    file = NULL;
    
    // shift all of the z by the minimum to start at zero
    if(two_d_data_)
    {
      float min = frame_zdata_[0];
      for(unsigned int j = 0; j<frame_count_;++j)
	if(frame_zdata_[j]<min)
	  min = frame_zdata_[j];
      for(unsigned int j = 0; j<frame_count_;++j)
	frame_zdata_[j] -= min ;
    }


  }
  catch(Exception & e)
  {
    // clean up data if it died
    e.printError();
    
    throw runtime_error("wrapper_i_hdf: constructor error");
    
  }
  
  for(unsigned int j= 0; j<frame_count_;++j)
    total_part_count_ += frame_c_.at(j);

  return true;
  
}
예제 #5
0
void Generic_wrapper_hdf::get_dset_info(std::vector<int> & dims,V_TYPE& vt ,const std::string & dset_name) const
{
  if (!(wrapper_open_))
    throw runtime_error("wrapper must be open to add a dataset");
  
  dims.clear();
  
  
  // get data set
  DataSet dset;
  // open data set  
  if(!group_open_ || dset_name[0] == '/')
  {
    dset = file_->openDataSet(dset_name);
  }
  else if(group_)
  {
    dset = group_->openDataSet(dset_name);
  }
  else
    throw logic_error("generic_wrapper_hdf:: can't add to a closed group");

  // identify type
  H5T_class_t dset_class_t = dset.getTypeClass();
  H5T_sign_t sign;
  switch(dset_class_t)
  {
  case H5T_INTEGER:  
    sign  = dset.getIntType().getSign();
    if(sign  == H5T_SGN_2)
      vt = V_INT;
    else if(sign == H5T_SGN_NONE)
      vt =  V_UINT;
    else
      vt =  V_ERROR;
  case H5T_FLOAT:  
    vt =  V_FLOAT;
  case H5T_STRING:  
  case H5T_TIME:  
  case H5T_BITFIELD:  
  case H5T_OPAQUE:  
  case H5T_COMPOUND:  
  case H5T_REFERENCE:  
  case H5T_ENUM:	    
  case H5T_VLEN:	    
  case H5T_ARRAY:	    
  case H5T_NO_CLASS:
  case H5T_NCLASSES:
    vt =  V_ERROR;
  }
  
  // get the data space
  DataSpace dataspace = dset.getSpace();
  // select everything
  dataspace.selectAll();
  // get the rank
  hsize_t rank = dataspace.getSimpleExtentNdims();
  // make dims the right size
  vector <hsize_t> tdims;
  tdims.resize(rank);
  // get the dimensionality 
  dataspace.getSimpleExtentDims(tdims.data(),NULL);
  // copy to the return vector
  dims.resize(rank);
  for(hsize_t j = 0; j<rank;++j)
    dims[j] = (unsigned int)tdims[j];

  

}
예제 #6
0
void Generic_wrapper_hdf::get_dset_priv(vector<T> & data,std::vector<unsigned int> & dims, const std::string & dset_name,const DataType & mtype) const
{
  if (!(wrapper_open_))
    throw runtime_error("wrapper must be open to read a dataset");
  
  dims.clear();
  data.clear();
  
  // get data set
  DataSet dset;
  // open data set  
  try
  {
    
  if(!group_open_ || dset_name[0] == '/')
  {
    if(file_)
      try
      {
        dset = file_->openDataSet(dset_name);
      }
      catch(FileIException &e)
      {
        throw runtime_error(e.getDetailMsg());
      }
    
    else
      throw runtime_error("there is no open file");
    
  }
  else if(group_)
  {
    dset = group_->openDataSet(dset_name);
  }
  else
    throw logic_error("generic_wrapper_hdf:: can't read from a closed group");
  }
  catch(Exception &e )
  {
    std::string er_msg = "error opening hdf \n" + e.getDetailMsg();
    throw runtime_error(er_msg);
  }
  
  // check type
  H5T_class_t dset_class_t = dset.getTypeClass();

  H5T_class_t mem_class_t = mtype.getClass();
  
  if(dset_class_t != mem_class_t)
    throw runtime_error("Data type miss-match");
  
  // if(mem_class_t == H5T_INTEGER)
  // {
  //   IntType mem_int = IntType(mtype);
  //   H5T_sign_t dsign = dset.getIntType().getSign();
  //   H5T_sign_t msign = mem_int.getSign();

  //   if(dsign  != msign)
  //     throw runtime_error("int signness miss-match ");

  // }
  
  
  // get the data space
  DataSpace dataspace = dset.getSpace();
  // select everything
  dataspace.selectAll();
  // get the rank
  hsize_t rank = dataspace.getSimpleExtentNdims();
  // make dims the right size

  
  vector <hsize_t> tdims;
  tdims.resize(rank);

  // get the dimensionality 
  dataspace.getSimpleExtentDims(tdims.data(),NULL);
  // copy to the return vector
  dims.resize(rank);

  for(hsize_t j = 0; j<rank;++j)
    dims[j] = (unsigned int)tdims[j];


  // get the number of entries
  hsize_t total = dataspace.getSimpleExtentNpoints();
  // resize the data vector
  data.resize(total);
  // read the data out 
  dset.read( data.data(), mtype, dataspace, dataspace );
  

}