コード例 #1
0
 void write_array (group_or_file f, std::string const & name, ArrayType const & A, bool C_reorder = true) {
  try {
   if (h5::exists(f, name)) f->unlink( name.c_str());  // put some option here ?
   DataSet ds;
   if (C_reorder) {
    BOOST_AUTO(C, make_const_cache(A,Option::C()));
    ds = f->createDataSet( name.c_str(), data_type_file(typename ArrayType::value_type()), data_space(C.view()) );
    ds.write( data(C.view()), data_type_mem(A), data_space(C.view()) );
   }
   else {
    ds = f->createDataSet( name.c_str(), data_type_file(typename ArrayType::value_type()), data_space(A) );
    ds.write( data(A), data_type_mem(A), data_space(A) );
   }
   // if complex, to be python compatible, we add the __complex__ attribute
   if (boost::is_complex<typename ArrayType::value_type>::value)  write_attribute(ds,"__complex__","1");
  }
  TRIQS_ARRAYS_H5_CATCH_EXCEPTION;
 }
コード例 #2
0
ファイル: dtk_adapter.C プロジェクト: ZJLi2013/libmesh
Teuchos::RCP<DataTransferKit::FieldManager<DTKAdapter::FieldContainerType> >
DTKAdapter::get_values_to_fill(std::string var_name)
{
  if(values_to_fill.find(var_name) == values_to_fill.end())
  {
    Teuchos::ArrayRCP<double> data_space(num_local_nodes);
    Teuchos::RCP<FieldContainerType> field_container = Teuchos::rcp(new FieldContainerType(data_space, 1));
    values_to_fill[var_name] = Teuchos::rcp(new DataTransferKit::FieldManager<FieldContainerType>(field_container, comm));
  }

  return values_to_fill[var_name];
}
コード例 #3
0
 void read_array (group_or_file f, std::string const & name,  ArrayType & A, bool C_reorder = true) {
  typedef typename ArrayType::value_type V;
  if (!h5::exists(f, name))  TRIQS_RUNTIME_ERROR << "no such dataset : "<<name <<" in file ";
  try {
   DataSet ds = f->openDataSet( name.c_str() );
   DataSpace dataspace = ds.getSpace();
   static const unsigned int Rank =  ArrayType::rank + (boost::is_complex<typename ArrayType::value_type>::value ? 1 : 0);
   int rank = dataspace.getSimpleExtentNdims();
   if (rank != Rank) TRIQS_RUNTIME_ERROR << "triqs::array::h5::read. Rank mismatch : the array has rank = "
    <<Rank<<" while the array stored in the hdf5 file has rank = "<<rank;
   mini_vector<hsize_t,Rank> dims_out;
   //int ndims = dataspace.getSimpleExtentDims( &dims_out[0], NULL);
   dataspace.getSimpleExtentDims( &dims_out[0], NULL);
   mini_vector<size_t,ArrayType::rank > d2; for (size_t u=0; u<ArrayType::rank ; ++u) d2[u] = dims_out[u];
   resize_or_check(A, d2 );
   if (C_reorder) {
    BOOST_AUTO(C,  make_cache(A, Option::C() ));
    ds.read( data(C.view()), data_type_mem(C.view()), data_space(C.view()) , dataspace );
   }
   else { ds.read( data(A), data_type_mem(A), data_space(A) , dataspace ); }
  }
  TRIQS_ARRAYS_H5_CATCH_EXCEPTION;
 }
コード例 #4
0
/*!
 * \brief Get the wave target space.
 */
Teuchos::RCP<DataTransferKit::FieldManager<WaveAdapter::FieldType> >
WaveAdapter::getTargetSpace( const RCP_Wave& wave )
{
    // Get the wave data vector we will write into.
    Teuchos::RCP<std::vector<double> > damping_space = wave->get_damping();

    // The data we are transferring has 1 dimension.
    int field_dim = 1;

    // Build an ArrayRCP from the data vector.
    Teuchos::ArrayRCP<double> data_space( &(*damping_space)[0], 0, 
					  damping_space->size(), false );

    // Build a field container from the data space.
    Teuchos::RCP<FieldType> field_container =
	Teuchos::rcp( new FieldType( data_space, field_dim ) );
    
    // Return a field manager for the target data space.
    return Teuchos::rcp( 
	new DataTransferKit::FieldManager<FieldType>( field_container, 
						      wave->get_comm() ) );
}
コード例 #5
0
  FullModelComposition<Vec,Mat>::FullModelComposition( int argc, char** argv,
                                                       const QUESO::BaseEnvironment& queso_env,
                                                       const GetPot& model_input )
    : _model(ModelBuilder<Vec,Mat>::build_model(queso_env,model_input)),
      _comm_handler(queso_env.subComm().Comm(),
                    model_input.vector_variable_size("Likelihood/datasets") )
  {
    // Grab the datasets we'll be working with
    unsigned int n_datasets = model_input.vector_variable_size("Likelihood/datasets");

    std::vector<std::string> datasets(n_datasets);
    for( unsigned int d = 0; d < n_datasets; d++ )
      {
        datasets[d] = model_input( "Likelihood/datasets", "DIE!", d );
      }

    // This is the dataset the current set of processors is going to work on
    int dataset_index = this->_comm_handler.get_dataset_index();

    // Input for this dataset
    _forward_run_input.reset( new GetPot(datasets[dataset_index]) );


    // Setup data space, 2 datapoints per dataset
    unsigned int n_datapoints = 2*n_datasets;
    QUESO::VectorSpace<Vec,Mat> data_space( queso_env, "data_", n_datapoints, NULL);

    _observations.reset( data_space.newVector() );
    _covariance.reset( data_space.newVector() );

    // Now parse data values and the corresponding covariances
    // Each processor parses its own dataset
    // Then we'll gather/broadcast to everyone
    std::vector<double> local_values(2);
    std::vector<double> all_values(n_datapoints);

    // Convention, mass_loss is first, then avg_N
    local_values[0] = (*_forward_run_input)("MassLossLikelihood/data_value", 0.0);
    local_values[1] = (*_forward_run_input)("AverageNLikelihood/data_value", 0.0);

    if( _comm_handler.get_inter0_rank() >= 0 )
      MPI_Gather( &local_values[0], 2, MPI_DOUBLE,
                  &all_values[0], 2, MPI_DOUBLE, 0,
                  _comm_handler.get_inter_chain_0_comm() );

    MPI_Bcast( &all_values[0], n_datapoints, MPI_DOUBLE,
               0, _comm_handler.get_inter_chain_comm() );

    for( unsigned int i = 0; i < n_datapoints; i++ )
      (*_observations)[i] = all_values[i];

    local_values[0] = (*_forward_run_input)("MassLossLikelihood/sigma", -1.0);
    local_values[1] = (*_forward_run_input)("AverageNLikelihood/sigma", -1.0);

    if( _comm_handler.get_inter0_rank() >= 0 )
      MPI_Gather( &local_values[0], 2, MPI_DOUBLE,
                  &all_values[0], 2, MPI_DOUBLE, 0,
                  _comm_handler.get_inter_chain_0_comm() );

    MPI_Bcast( &all_values[0], n_datapoints, MPI_DOUBLE,
               0, _comm_handler.get_inter_chain_comm() );

    for( unsigned int i = 0; i < n_datapoints; i++ )
      (*_covariance)[i] = all_values[i];


    // Now setup model to be evaluated on this set of processors
    // We do this last because of the UFO check in GRINS
    _model_evaluator.reset( new FullModelEvaluator<Vec,Mat>(argc,argv,
                                                            queso_env,
                                                            *(_forward_run_input.get()),
                                                            _comm_handler.get_split_chain_comm(),
                                                            *(_model.get())) );
  }
コード例 #6
0
ファイル: ringbuffer.cpp プロジェクト: sigmadrone/sigmadrone
size_t RingBuffer::write_size()
{
	unsigned int spacesize = data_space();
	unsigned int spacetoend = bufsize_ - wp_;
	return (spacesize < spacetoend) ? spacesize : spacetoend;
}
コード例 #7
0
  SurrogateModelComposition<Vec,Mat>::SurrogateModelComposition(const QUESO::BaseEnvironment& queso_env,
                                                                const GetPot& model_input)
    : _model(ModelBuilder<Vec,Mat>::build_model(queso_env,model_input))
  {
    const QUESO::FullEnvironment& full_env = dynamic_cast<const QUESO::FullEnvironment&>( queso_env );

    // Grab the datasets we'll be working with
    unsigned int n_datasets = model_input.vector_variable_size("Likelihood/datasets");

    // Parse dataset names
    std::vector<std::string> dataset_names(n_datasets);
    for( unsigned int d = 0; d < n_datasets; d++ )
        dataset_names[d] = model_input( "Likelihood/datasets", "DIE!", d );

    // Setup forward model inputs
    _forward_model_inputs.resize(n_datasets,NULL);
    for( unsigned int d = 0; d < n_datasets; d++ )
      _forward_model_inputs[d] = new GetPot( dataset_names[d]+".in" );

    // Setup data space, 2 datapoints per dataset
    unsigned int n_datapoints = 2*n_datasets;
    QUESO::VectorSpace<Vec,Mat> data_space( queso_env, "data_", n_datapoints, NULL);

    _observations.reset( data_space.newVector() );
    _covariance.reset( data_space.newVector() );

    // Parse input files for observations, covariance
    for( unsigned int d = 0; d < n_datasets; d++ )
      {
        (*_observations)[2*d]   = (*_forward_model_inputs[d])("MassLossLikelihood/data_value", 0.0);
        (*_observations)[2*d+1] = (*_forward_model_inputs[d])("AverageNLikelihood/data_value", 0.0);

        double mass_loss_sigma = (*_forward_model_inputs[d])("MassLossLikelihood/sigma", 0.0);
        double avg_N_sigma = (*_forward_model_inputs[d])("AverageNLikelihood/sigma", 0.0);

        (*_covariance)[2*d]   = mass_loss_sigma*mass_loss_sigma;
        (*_covariance)[2*d+1] = avg_N_sigma*avg_N_sigma;
      }

    // Now setup IO classes. These will hold the InterpolationSurrodateData we need for evaluation
    _interp_io.resize( n_datapoints );
    for( unsigned int d = 0; d < n_datasets; d++ )
      {
        std::string mass_loss_filename = dataset_names[d]+"_massloss.dat";
        std::string avg_N_filename = dataset_names[d]+"_avgN.dat";

        _interp_io[2*d] = new QUESO::InterpolationSurrogateIOASCII<Vec,Mat>;
        _interp_io[2*d+1] = new QUESO::InterpolationSurrogateIOASCII<Vec,Mat>;

        _interp_io[2*d]->read( mass_loss_filename, full_env, "");
        _interp_io[2*d+1]->read( avg_N_filename, full_env, "");
      }

    // Now setup interpolation surrogates
    _interp_surrogate.resize( n_datapoints );
    for( unsigned int d = 0; d < n_datasets; d++ )
      {
        _interp_surrogate[2*d] =
          new QUESO::LinearLagrangeInterpolationSurrogate<Vec,Mat>( _interp_io[2*d]->data() );

        _interp_surrogate[2*d+1] =
          new QUESO::LinearLagrangeInterpolationSurrogate<Vec,Mat>( _interp_io[2*d+1]->data() );
      }
  }