コード例 #1
0
ファイル: DofMap.cpp プロジェクト: MiroK/DolfinSurface
//-----------------------------------------------------------------------------
void DofMap::tabulate_coordinates(boost::multi_array<double, 2>& coordinates,
                                  const ufc::cell& ufc_cell) const
{
  // FIXME: This is a hack because UFC wants a double pointer for coordinates
  dolfin_assert(_ufc_dofmap);

  // Check dimensions
  if (coordinates.shape()[0] != cell_dimension(ufc_cell.index) ||
      coordinates.shape()[1] != _ufc_dofmap->geometric_dimension())
  {
    boost::multi_array<double, 2>::extent_gen extents;
    const std::size_t cell_dim = cell_dimension(ufc_cell.index);
    coordinates.resize(extents[cell_dim][_ufc_dofmap->geometric_dimension()]);
  }

  // Set vertex coordinates
  const std::size_t num_points = coordinates.size();
  std::vector<double*> coords(num_points);
  for (std::size_t i = 0; i < num_points; ++i)
    coords[i] = &(coordinates[i][0]);

  // Tabulate coordinates
  _ufc_dofmap->tabulate_coordinates(coords.data(),
                                    &ufc_cell.vertex_coordinates[0]);
}
コード例 #2
0
ファイル: MultiArray.cpp プロジェクト: andrealani/coolfluid3
XmlNode add_multi_array_in( Map & map, const std::string & name,
                            const boost::multi_array<Real, 2> & array,
                            const std::string & delimiter,
                            const std::vector<std::string> & labels )
{
  cf_assert( map.content.is_valid() );
  cf_assert( !name.empty())
  cf_assert( !map.check_entry(name) );
  cf_assert( !delimiter.empty() );

  std::string labels_str = boost::algorithm::join(labels, delimiter);

  XmlNode array_node =  map.content.add_node( Protocol::Tags::node_array() );

  array_node.add_node( Protocol::Tags::type<std::string>(), labels_str );

  XmlNode data_node = array_node.add_node( Protocol::Tags::type<Real>() );

  Uint nb_rows = array.size();
  Uint nb_cols = 0;

  std::string str;
  std::string size;

  if(nb_rows != 0)
    nb_cols = array[0].size();

  size = to_str(nb_rows) + ':' + to_str(nb_cols);

  array_node.set_attribute( Protocol::Tags::attr_key(), name );

  data_node.set_attribute( "dimensions", to_str((Uint)array.dimensionality) );
  data_node.set_attribute( Protocol::Tags::attr_array_delimiter(), delimiter );
  data_node.set_attribute( Protocol::Tags::attr_array_size(), size);
  data_node.set_attribute( "merge_delimiter", to_str(true) ); // temporary

  // build the value string (ideas are welcome to avoid multiple
  // memory reallocations
  for(Uint row = 0 ; row < nb_rows ; ++row)
  {
    for(Uint col = 0 ; col < nb_cols ; ++col)
      str += to_str( array[row][col] ) + delimiter;

    str += '\n'; // line break after each row
  }

  data_node.set_value(str.c_str());

  return array_node;
}
コード例 #3
0
object computeConvexDecomposition(const boost::multi_array<float, 2>& vertices, const boost::multi_array<int, 2>& indices,
                                  NxF32 skinWidth=0, NxU32 decompositionDepth=8, NxU32 maxHullVertices=64, NxF32 concavityThresholdPercent=0.1f, NxF32 mergeThresholdPercent=30.0f, NxF32 volumeSplitThresholdPercent=0.1f, bool useInitialIslandGeneration=true, bool useIslandGeneration=false)
{
    boost::shared_ptr<CONVEX_DECOMPOSITION::iConvexDecomposition> ic(CONVEX_DECOMPOSITION::createConvexDecomposition(),CONVEX_DECOMPOSITION::releaseConvexDecomposition);

    if( indices.size() > 0 ) {
        FOREACHC(it,indices)
        ic->addTriangle(&vertices[(*it)[0]][0], &vertices[(*it)[1]][0], &vertices[(*it)[2]][0]);
    }
    else {
        BOOST_ASSERT((vertices.size()%3)==0);
        for(size_t i = 0; i < vertices.size(); i += 3)
            ic->addTriangle(&vertices[i][0], &vertices[i+1][0], &vertices[i+2][0]);
    }

    ic->computeConvexDecomposition(skinWidth, decompositionDepth, maxHullVertices, concavityThresholdPercent, mergeThresholdPercent, volumeSplitThresholdPercent, useInitialIslandGeneration, useIslandGeneration, false);
    NxU32 hullCount = ic->getHullCount();
    boost::python::list hulls;
    CONVEX_DECOMPOSITION::ConvexHullResult result;
    for(NxU32 i = 0; i < hullCount; ++i) {
        ic->getConvexHullResult(i,result);

        npy_intp dims[] = { result.mVcount,3};
        PyObject *pyvertices = PyArray_SimpleNew(2,dims, sizeof(result.mVertices[0])==8 ? PyArray_DOUBLE : PyArray_FLOAT);
        std::copy(&result.mVertices[0],&result.mVertices[3*result.mVcount],(NxF32*)PyArray_DATA(pyvertices));

        dims[0] = result.mTcount;
        dims[1] = 3;
        PyObject *pyindices = PyArray_SimpleNew(2,dims, PyArray_INT);
        std::copy(&result.mIndices[0],&result.mIndices[3*result.mTcount],(int*)PyArray_DATA(pyindices));

        hulls.append(boost::python::make_tuple(static_cast<numeric::array>(handle<>(pyvertices)), static_cast<numeric::array>(handle<>(pyindices))));
    }

    return hulls;
}
コード例 #4
0
//-----------------------------------------------------------------------------
void MeshPartitioning::build_mesh(Mesh& mesh,
              const std::vector<std::size_t>& global_cell_indices,
              const boost::multi_array<std::size_t, 2>& cell_global_vertices,
              const std::vector<std::size_t>& vertex_indices,
              const boost::multi_array<double, 2>& vertex_coordinates,
              const std::map<std::size_t, std::size_t>& vertex_global_to_local,
              std::size_t tdim, std::size_t gdim, std::size_t num_global_cells,
              std::size_t num_global_vertices)
{
  Timer timer("PARALLEL 3: Build mesh (from local mesh data)");

  // Get number of processes and process number
  const std::size_t num_processes = MPI::num_processes();
  const std::size_t process_number = MPI::process_number();

  // Open mesh for editing
  mesh.clear();
  MeshEditor editor;
  editor.open(mesh, tdim, gdim);

  // Add vertices
  editor.init_vertices(vertex_coordinates.size());
  Point point(gdim);
  dolfin_assert(vertex_indices.size() == vertex_coordinates.size());
  for (std::size_t i = 0; i < vertex_coordinates.size(); ++i)
  {
    for (std::size_t j = 0; j < gdim; ++j)
      point[j] = vertex_coordinates[i][j];
    editor.add_vertex_global(i, vertex_indices[i], point);
  }

  // Add cells
  editor.init_cells(cell_global_vertices.size());
  const std::size_t num_cell_vertices = tdim + 1;
  std::vector<std::size_t> cell(num_cell_vertices);
  for (std::size_t i = 0; i < cell_global_vertices.size(); ++i)
  {
    for (std::size_t j = 0; j < num_cell_vertices; ++j)
    {
      // Get local cell vertex
      std::map<std::size_t, std::size_t>::const_iterator iter
          = vertex_global_to_local.find(cell_global_vertices[i][j]);
      dolfin_assert(iter != vertex_global_to_local.end());
      cell[j] = iter->second;
    }
    editor.add_cell(i, global_cell_indices[i], cell);
  }

  // Close mesh: Note that this must be done after creating the global
  // vertex map or otherwise the ordering in mesh.close() will be wrong
  // (based on local numbers).
  editor.close();

  // Set global number of cells and vertices
  mesh.topology().init_global(0, num_global_vertices);
  mesh.topology().init_global(tdim,  num_global_cells);

  // Construct boundary mesh
  BoundaryMesh bmesh(mesh, "exterior");

  const MeshFunction<std::size_t>& boundary_vertex_map = bmesh.entity_map(0);
  const std::size_t boundary_size = boundary_vertex_map.size();

  // Build sorted array of global boundary vertex indices (global
  // numbering)
  std::vector<std::size_t> global_vertex_send(boundary_size);
  for (std::size_t i = 0; i < boundary_size; ++i)
    global_vertex_send[i] = vertex_indices[boundary_vertex_map[i]];
  std::sort(global_vertex_send.begin(), global_vertex_send.end());

  // Receive buffer
  std::vector<std::size_t> global_vertex_recv;

  // Create shared_vertices data structure: mapping from shared vertices
  // to list of neighboring processes
  std::map<unsigned int, std::set<unsigned int> >& shared_vertices
        = mesh.topology().shared_entities(0);
  shared_vertices.clear();

  // FIXME: Remove computation from inside communication loop

  // Build shared vertex to sharing processes map
  for (std::size_t i = 1; i < num_processes; ++i)
  {
    // We send data to process p - i (i steps to the left)
    const int p = (process_number - i + num_processes) % num_processes;

    // We receive data from process p + i (i steps to the right)
    const int q = (process_number + i) % num_processes;

    // Send and receive
    MPI::send_recv(global_vertex_send, p, global_vertex_recv, q);

    // Compute intersection of global indices
    std::vector<std::size_t> intersection(std::min(global_vertex_send.size(),
                                                   global_vertex_recv.size()));
    std::vector<std::size_t>::iterator intersection_end
      = std::set_intersection(global_vertex_send.begin(),
                              global_vertex_send.end(),
                              global_vertex_recv.begin(),
                              global_vertex_recv.end(),
                              intersection.begin());

    // Fill shared vertices information
    std::vector<std::size_t>::const_iterator global_index;
    for (global_index = intersection.begin(); global_index != intersection_end;
         ++global_index)
    {
      // Get local index
      std::map<std::size_t, std::size_t>::const_iterator local_index;
      local_index = vertex_global_to_local.find(*global_index);
      dolfin_assert(local_index != vertex_global_to_local.end());

      // Insert (local index, [proc])
      shared_vertices[local_index->second].insert(q);
    }
  }
}