Пример #1
0
//pb with convolution: regarder les pixels à l'intérieurs de la matrice. Affichage ligne 36. Travailler sur a qui semble etre nul trop souvent.
void Convolution(boost::multi_array<unsigned char,2> &input,
                       boost::multi_array<unsigned char,2> &output,
                       boost::multi_array<float,2>         &kernel){
	if (input.shape()[0]==output.shape()[0] and input.shape()[1]==output.shape()[1] 
		and kernel.shape()[0]== kernel.shape()[1] and kernel.shape()[0]%2==1
		and kernel.shape()[0]>=3 ){
		int k0 =int(kernel.shape()[0]);
	   
		int k1 = int(kernel.shape()[1]);
		int k = int(floor(double(kernel.shape()[0]/2)));
		int row_size = int(output.shape()[0]);
		int column_size = int(output.shape()[1]);
		
		boost::multi_array<float,2> a(boost::extents[2*k+row_size][2*k+column_size]);
		
		for(int j=0;j<k+1;j++){for(int i=0;i<k+1;i++){a[i][j]=float(input[0][0]);}}
		for(int j=0;j<k+1;j++){for(int i=row_size+k-1;i<row_size+2*k;i++){a[i][j]=float(input[row_size-1][0]);}}
		for(int j=column_size+k-1;j<column_size+2*k;j++){for(int i=0;i<k+1;i++){a[i][j]=float(input[0][column_size-1]);}}
		for(int j=column_size+k-1;j<column_size+2*k;j++){for(int i=row_size+k-1;i<row_size+2*k;i++){a[i][j]=float(input[row_size-1][column_size-1]);}}
			
		for(int j =0;j<k;j++){for(int i=k+1;i<k+row_size-1;i++){a[i][j]=float(input[i-k][k]);}}
		for(int j =k+column_size;j<2*k+column_size;j++){for(int i=k+1;i<row_size-1;i++){a[i][j]=float(input[i][column_size-1]);}}//pb
		for(int j =k+1;j<k+column_size-1;j++){for(int i=0;i<k;i++){a[i][j]=float(input[k][j-k]);}}
		for(int j =k+1;j<k+column_size-1;j++){for(int i=k+row_size;i<2*k+row_size;i++){a[i][j]=float(input[row_size-1][j-k]);}}
		for(int j = k+1;j<k+column_size-1;j++){for(int i=k+1;i<k+row_size-1;i++){a[i][j]=float(input[i-k][j-k]);}}
		
		for(int j=k;j<k+column_size;j++){for(int i=k;i<k+row_size;i++){
			float p=0;
		boost::multi_array<float,2> b(boost::extents[k0][k1]);
		
		for(int r=0;r<k0;r++){for(int s=0; s<k1;s++){
			b[r][s]=a[i-k+r][j-k+s];
		}}
		
		for(int r=0;r<k0;r++){for(int s=0; s<k1;s++){p=p+b[r][s]*float(kernel[r][s]);}}
			if(p<0){p=0;}
			if(p>=255){p=255;}
		
         output[i-k][j-k]= static_cast<unsigned char>(lround(p));
			
		}
		
	}
    }
   else{
    std::cout << "Error from the data:" << std::endl;
    std::cout <<  " See the usage of the function convolution" << std::endl;
    

   }


}
Пример #2
0
 /// Copies the contents of the table into the LSS::Vector.
 void set( boost::multi_array<Real, 2>& data)
 {
   cf3_assert(m_is_created);
   cf3_assert(data.shape()[0]==m_blockrow_size);
   cf3_assert(data.shape()[1]==m_neq);
 }
Пример #3
0
//-----------------------------------------------------------------------------
void MeshPartitioning::build_mesh(Mesh& mesh,
              const std::vector<std::size_t>& global_cell_indices,
              const boost::multi_array<std::size_t, 2>& cell_global_vertices,
              const std::vector<std::size_t>& vertex_indices,
              const boost::multi_array<double, 2>& vertex_coordinates,
              const std::map<std::size_t, std::size_t>& vertex_global_to_local,
              std::size_t tdim, std::size_t gdim, std::size_t num_global_cells,
              std::size_t num_global_vertices)
{
  Timer timer("PARALLEL 3: Build mesh (from local mesh data)");

  // Get number of processes and process number
  const std::size_t num_processes = MPI::num_processes();
  const std::size_t process_number = MPI::process_number();

  // Open mesh for editing
  mesh.clear();
  MeshEditor editor;
  editor.open(mesh, tdim, gdim);

  // Add vertices
  editor.init_vertices(vertex_coordinates.size());
  Point point(gdim);
  dolfin_assert(vertex_indices.size() == vertex_coordinates.size());
  for (std::size_t i = 0; i < vertex_coordinates.size(); ++i)
  {
    for (std::size_t j = 0; j < gdim; ++j)
      point[j] = vertex_coordinates[i][j];
    editor.add_vertex_global(i, vertex_indices[i], point);
  }

  // Add cells
  editor.init_cells(cell_global_vertices.size());
  const std::size_t num_cell_vertices = tdim + 1;
  std::vector<std::size_t> cell(num_cell_vertices);
  for (std::size_t i = 0; i < cell_global_vertices.size(); ++i)
  {
    for (std::size_t j = 0; j < num_cell_vertices; ++j)
    {
      // Get local cell vertex
      std::map<std::size_t, std::size_t>::const_iterator iter
          = vertex_global_to_local.find(cell_global_vertices[i][j]);
      dolfin_assert(iter != vertex_global_to_local.end());
      cell[j] = iter->second;
    }
    editor.add_cell(i, global_cell_indices[i], cell);
  }

  // Close mesh: Note that this must be done after creating the global
  // vertex map or otherwise the ordering in mesh.close() will be wrong
  // (based on local numbers).
  editor.close();

  // Set global number of cells and vertices
  mesh.topology().init_global(0, num_global_vertices);
  mesh.topology().init_global(tdim,  num_global_cells);

  // Construct boundary mesh
  BoundaryMesh bmesh(mesh, "exterior");

  const MeshFunction<std::size_t>& boundary_vertex_map = bmesh.entity_map(0);
  const std::size_t boundary_size = boundary_vertex_map.size();

  // Build sorted array of global boundary vertex indices (global
  // numbering)
  std::vector<std::size_t> global_vertex_send(boundary_size);
  for (std::size_t i = 0; i < boundary_size; ++i)
    global_vertex_send[i] = vertex_indices[boundary_vertex_map[i]];
  std::sort(global_vertex_send.begin(), global_vertex_send.end());

  // Receive buffer
  std::vector<std::size_t> global_vertex_recv;

  // Create shared_vertices data structure: mapping from shared vertices
  // to list of neighboring processes
  std::map<unsigned int, std::set<unsigned int> >& shared_vertices
        = mesh.topology().shared_entities(0);
  shared_vertices.clear();

  // FIXME: Remove computation from inside communication loop

  // Build shared vertex to sharing processes map
  for (std::size_t i = 1; i < num_processes; ++i)
  {
    // We send data to process p - i (i steps to the left)
    const int p = (process_number - i + num_processes) % num_processes;

    // We receive data from process p + i (i steps to the right)
    const int q = (process_number + i) % num_processes;

    // Send and receive
    MPI::send_recv(global_vertex_send, p, global_vertex_recv, q);

    // Compute intersection of global indices
    std::vector<std::size_t> intersection(std::min(global_vertex_send.size(),
                                                   global_vertex_recv.size()));
    std::vector<std::size_t>::iterator intersection_end
      = std::set_intersection(global_vertex_send.begin(),
                              global_vertex_send.end(),
                              global_vertex_recv.begin(),
                              global_vertex_recv.end(),
                              intersection.begin());

    // Fill shared vertices information
    std::vector<std::size_t>::const_iterator global_index;
    for (global_index = intersection.begin(); global_index != intersection_end;
         ++global_index)
    {
      // Get local index
      std::map<std::size_t, std::size_t>::const_iterator local_index;
      local_index = vertex_global_to_local.find(*global_index);
      dolfin_assert(local_index != vertex_global_to_local.end());

      // Insert (local index, [proc])
      shared_vertices[local_index->second].insert(q);
    }
  }
}
Пример #4
0
 void save(output_archive& ar, const boost::multi_array<T, N,
     Allocator>& marray, unsigned)
 {
     ar & make_array(marray.shape(), marray.num_dimensions());
     ar & make_array(marray.data(), marray.num_elements());
 }
Пример #5
0
//-----------------------------------------------------------------------------
void  MeshPartitioning::distribute_cells(const LocalMeshData& mesh_data,
                            const std::vector<std::size_t>& cell_partition,
                            std::vector<std::size_t>& global_cell_indices,
                            boost::multi_array<std::size_t, 2>& cell_vertices)
{
  // This function takes the partition computed by the partitioner
  // (which tells us to which process each of the local cells stored in
  // LocalMeshData on this process belongs. We use MPI::all_to_all to
  // redistribute all cells (the global vertex indices of all cells).

  // Number of MPI processes
  const std::size_t num_processes = MPI::num_processes();

  // Get dimensions of local mesh_data
  const std::size_t num_local_cells = mesh_data.cell_vertices.size();
  dolfin_assert(mesh_data.global_cell_indices.size() == num_local_cells);
  const std::size_t num_cell_vertices = mesh_data.num_vertices_per_cell;
  if (!mesh_data.cell_vertices.empty())
  {
    if (mesh_data.cell_vertices[0].size() != num_cell_vertices)
    {
      dolfin_error("MeshPartitioning.cpp",
                   "distribute cells",
                   "Mismatch in number of cell vertices (%d != %d) on process %d",
                   mesh_data.cell_vertices[0].size(), num_cell_vertices,
                   MPI::process_number());
    }
  }

  // Build array of cell-vertex connectivity and partition vector
  // Distribute the global cell number as well
  std::vector<std::vector<std::size_t> > send_cell_vertices(num_processes);
  for (std::size_t i = 0; i < num_local_cells; i++)
  {
    const std::size_t dest = cell_partition[i];
    send_cell_vertices[dest].push_back(mesh_data.global_cell_indices[i]);
    for (std::size_t j = 0; j < num_cell_vertices; j++)
      send_cell_vertices[dest].push_back(mesh_data.cell_vertices[i][j]);
  }

  // Distribute cell-vertex connectivity
  std::vector<std::vector<std::size_t> > received_cell_vertices(num_processes);
  MPI::all_to_all(send_cell_vertices, received_cell_vertices);

  // Count number of received cells
  std::size_t num_new_local_cells = 0;
  for (std::size_t p = 0; p < received_cell_vertices.size(); ++p)
  {
    num_new_local_cells
      += received_cell_vertices[p].size()/(num_cell_vertices + 1);
  }

  // Put mesh_data back into mesh_data.cell_vertices
  cell_vertices.resize(boost::extents[num_new_local_cells][num_cell_vertices]);
  global_cell_indices.resize(num_new_local_cells);

  // Loop over new cells
  std::size_t c = 0;
  for (std::size_t p = 0; p < num_processes; ++p)
  {
    for (std::size_t i = 0; i < received_cell_vertices[p].size();
         i += (num_cell_vertices + 1))
    {
      global_cell_indices[c] = received_cell_vertices[p][i];
      for (std::size_t j = 0; j < num_cell_vertices; ++j)
        cell_vertices[c][j] = received_cell_vertices[p][i + 1 + j];

      ++c;
    }
  }
}
Пример #6
0
//-----------------------------------------------------------------------------
void MeshPartitioning::distribute_vertices(const LocalMeshData& mesh_data,
                    const boost::multi_array<std::size_t, 2>& cell_vertices,
                    std::vector<std::size_t>& vertex_indices,
                    std::map<std::size_t, std::size_t>& vertex_global_to_local,
                    boost::multi_array<double, 2>& vertex_coordinates)
{
  // This function distributes all vertices (coordinates and
  // local-to-global mapping) according to the cells that are stored on
  // each process. This happens in several stages: First each process
  // figures out which vertices it needs (by looking at its cells)
  // and where those vertices are located. That information is then
  // distributed so that each process learns where it needs to send
  // its vertices.

  // Get number of processes
  const std::size_t num_processes = MPI::num_processes();

  // Get geometric dimension
  const std::size_t gdim = mesh_data.gdim;

  // Compute which vertices we need
  std::set<std::size_t> needed_vertex_indices;
  boost::multi_array<std::size_t, 2>::const_iterator vertices;
  for (vertices = cell_vertices.begin(); vertices != cell_vertices.end();
       ++vertices)
  {
    needed_vertex_indices.insert(vertices->begin(), vertices->end());
  }

  // Compute where (process number) the vertices we need are located
  std::vector<std::vector<std::size_t> > send_vertex_indices(num_processes);
  std::vector<std::vector<std::size_t> > vertex_location(num_processes);
  std::set<std::size_t>::const_iterator required_vertex;
  for (required_vertex = needed_vertex_indices.begin();
       required_vertex != needed_vertex_indices.end(); ++required_vertex)
  {
    // Get process that has required vertex
    const std::size_t location
      = MPI::index_owner(*required_vertex, mesh_data.num_global_vertices);
    send_vertex_indices[location].push_back(*required_vertex);
    vertex_location[location].push_back(*required_vertex);
  }

  // Send required vertices to other processes, and receive back vertices
  // required by other processes.
  std::vector<std::vector<std::size_t> > received_vertex_indices;
  MPI::all_to_all(send_vertex_indices, received_vertex_indices);

  // Distribute vertex coordinates
  std::vector<std::vector<double> > send_vertex_coordinates(num_processes);
  const std::pair<std::size_t, std::size_t> local_vertex_range
    = MPI::local_range(mesh_data.num_global_vertices);
  for (std::size_t p = 0; p < num_processes; ++p)
  {
    send_vertex_coordinates[p].reserve(received_vertex_indices[p].size()*gdim);
    for (std::size_t i = 0; i < received_vertex_indices[p].size(); ++i)
    {
      dolfin_assert(received_vertex_indices[p][i] >= local_vertex_range.first
                 && received_vertex_indices[p][i] < local_vertex_range.second);
      const std::size_t location
        = received_vertex_indices[p][i] - local_vertex_range.first;
      for (std::size_t j = 0; j < gdim; ++j)
        send_vertex_coordinates[p].push_back(mesh_data.vertex_coordinates[location][j]);
    }
  }
  std::vector<std::vector<double> > received_vertex_coordinates;
  MPI::all_to_all(send_vertex_coordinates, received_vertex_coordinates);

  // Set index counters to first position in receive buffers
  std::vector<std::size_t> index_counters(num_processes, 0);

  // Clear data
  vertex_indices.clear();
  vertex_global_to_local.clear();

  // Count number of local vertices
  std::size_t num_local_vertices = 0;
  for (std::size_t p = 0; p < num_processes; ++p)
    num_local_vertices += received_vertex_coordinates[p].size()/gdim;

  // Store coordinates and construct global to local mapping
  vertex_coordinates.resize(boost::extents[num_local_vertices][gdim]);
  vertex_indices.resize(num_local_vertices);
  std::size_t v = 0;
  for (std::size_t p = 0; p < num_processes; ++p)
  {
    for (std::size_t i = 0; i < received_vertex_coordinates[p].size();
         i += gdim)
    {
      for (std::size_t j = 0; j < gdim; ++j)
        vertex_coordinates[v][j] = received_vertex_coordinates[p][i + j];

      const std::size_t global_vertex_index
        = vertex_location[p][index_counters[p]++];
      vertex_global_to_local[global_vertex_index] = v;
      vertex_indices[v] = global_vertex_index;

      ++v;
    }
  }
}
Пример #7
0
std::int32_t GraphBuilder::compute_local_dual_graph_keyed(
  const MPI_Comm mpi_comm,
  const boost::multi_array<std::int64_t, 2>& cell_vertices,
  const CellType& cell_type,
  std::vector<std::vector<std::size_t>>& local_graph,
  FacetCellMap& facet_cell_map)
{
  Timer timer("Compute local part of mesh dual graph");

  const std::int8_t tdim = cell_type.dim();
  const std::int32_t num_local_cells = cell_vertices.shape()[0];
  const std::int8_t num_vertices_per_cell = cell_type.num_entities(0);
  const std::int8_t num_facets_per_cell = cell_type.num_entities(tdim - 1);
  const std::int8_t num_vertices_per_facet = cell_type.num_vertices(tdim - 1);

  dolfin_assert(N == num_vertices_per_facet);
  dolfin_assert(num_local_cells == (int) cell_vertices.shape()[0]);
  dolfin_assert(num_vertices_per_cell == (int) cell_vertices.shape()[1]);

  local_graph.resize(num_local_cells);
  facet_cell_map.clear();

  // Compute local edges (cell-cell connections) using global
  // (internal to this function, not the user numbering) numbering

  // Get offset for this process
  const std::int64_t cell_offset = MPI::global_offset(mpi_comm, num_local_cells,
                                                      true);

  // Create map from cell vertices to entity vertices
  boost::multi_array<unsigned int, 2>
    facet_vertices(boost::extents[num_facets_per_cell][num_vertices_per_facet]);
  std::vector<unsigned int> v(num_vertices_per_cell);
  std::iota(v.begin(), v.end(), 0);
  cell_type.create_entities(facet_vertices, tdim - 1, v.data());

  // Vector-of-arrays data structure, which is considerably faster than
  // vector-of-vectors.
  std::vector<std::pair<std::array<std::int32_t, N>, std::int32_t>>
    facets(num_facets_per_cell*num_local_cells);

  // Iterate over all cells and build list of all facets (keyed on
  // sorted vertex indices), with cell index attached
  int counter = 0;
  for (std::int32_t i = 0; i < num_local_cells; ++i)
  {
    // Iterate over facets of cell
    for (std::int8_t j = 0; j < num_facets_per_cell; ++j)
    {
      // Get list of facet vertices
      auto& facet = facets[counter].first;
      for (std::int8_t k = 0; k < N; ++k)
        facet[k] = cell_vertices[i][facet_vertices[j][k]];

      // Sort facet vertices
      std::sort(facet.begin(), facet.end());

      // Attach local cell index
      facets[counter].second = i;

      // Increment facet counter
      counter++;
    }
  }

  // Sort facets
  std::sort(facets.begin(), facets.end());

  // Find maching facets by comparing facet i and facet i -1
  std::size_t num_local_edges = 0;
  for (std::size_t i = 1; i < facets.size(); ++i)
  {
    const int ii = i;
    const int jj = i - 1;

    const auto& facet0 = facets[jj].first;
    const auto& facet1 = facets[ii].first;
    const int cell_index0 = facets[jj].second;
    if (std::equal(facet1.begin(), facet1.end(), facet0.begin()))
    {
      // Add edges (directed graph, so add both ways)
      const int cell_index1 = facets[ii].second;
      local_graph[cell_index0].push_back(cell_index1 + cell_offset);
      local_graph[cell_index1].push_back(cell_index0 + cell_offset);

      // Since we've just found a matching pair, the next pair cannot be
      // matching, so advance 1
      ++i;

      // Increment number of local edges found
      ++num_local_edges;
    }
    else
    {
      // No match, so add facet0 to map
      //facet_cell_map.insert(facet_cell_map.end(), {std::vector<std::size_t>(facet0.begin(),
      //    facet0.end()), cell_index0});
      facet_cell_map.push_back({std::vector<std::size_t>(facet0.begin(),
          facet0.end()), cell_index0});
    }
  }

  // Add last facet, as it's not covered by the above loop. We could
  // check it against the preceding facet, but it's easier to just
  // insert it here
  if (!facets.empty())
  {
    const int k = facets.size() - 1;
    const int cell_index = facets[k].second;
    facet_cell_map.push_back({std::vector<std::size_t>(facets[k].first.begin(),
        facets[k].first.end()), cell_index});
  }

  return num_local_edges;
}
Пример #8
0
//-----------------------------------------------------------------------------
std::int32_t GraphBuilder::compute_nonlocal_dual_graph(
  const MPI_Comm mpi_comm,
  const boost::multi_array<std::int64_t, 2>& cell_vertices,
  const CellType& cell_type,
  const std::int64_t num_global_vertices,
  std::vector<std::vector<std::size_t>>& local_graph,
  FacetCellMap& facet_cell_map,
  std::set<std::int64_t>& ghost_vertices)
{
  log(PROGRESS, "Build nonlocal part of mesh dual graph");
  Timer timer("Compute non-local part of mesh dual graph");

  // Get number of MPI processes, and return if mesh is not distributed
  const int num_processes = MPI::size(mpi_comm);
  if (num_processes == 1)
    return 0;

  // At this stage facet_cell map only contains facets->cells with
  // edge facets either interprocess or external boundaries

  const int tdim = cell_type.dim();

  // List of cell vertices
  const std::int32_t num_local_cells = cell_vertices.shape()[0];
  const std::int8_t num_vertices_per_cell = cell_type.num_entities(0);
  const std::int8_t num_vertices_per_facet = cell_type.num_vertices(tdim - 1);

  dolfin_assert(num_local_cells == (int) cell_vertices.shape()[0]);
  dolfin_assert(num_vertices_per_cell == (int) cell_vertices.shape()[1]);

  // Compute local edges (cell-cell connections) using global
  // (internal to this function, not the user numbering) numbering

  // Get offset for this process
  const std::int64_t offset = MPI::global_offset(mpi_comm, num_local_cells,
                                                 true);

  // Send facet-cell map to intermediary match-making processes
  std::vector<std::vector<std::size_t>> send_buffer(num_processes);
  std::vector<std::vector<std::size_t>> received_buffer(num_processes);

  // Pack map data and send to match-maker process
  for (auto &it : facet_cell_map)
  {
    // FIXME: Could use a better index? First vertex is slightly
    //        skewed towards low values - may not be important

    // Use first vertex of facet to partition into blocks
    const int dest_proc = MPI::index_owner(mpi_comm, (it.first)[0],
                                           num_global_vertices);

    // Pack map into vectors to send
    send_buffer[dest_proc].insert(send_buffer[dest_proc].end(),
                                  it.first.begin(), it.first.end());

    // Add offset to cell numbers sent off process
    send_buffer[dest_proc].push_back(it.second + offset);
  }

  // FIXME: This does not look memory scalable. Switch to 'post-office' model.
  // Send data
  MPI::all_to_all(mpi_comm, send_buffer, received_buffer);

  // Clear send buffer
  send_buffer = std::vector<std::vector<std::size_t>>(num_processes);

  // Map to connect processes and cells, using facet as key
  typedef boost::unordered_map<std::vector<std::size_t>,
              std::pair<std::size_t, std::size_t>> MatchMap;
  MatchMap matchmap;

  // Look for matches to send back to other processes
  std::pair<std::vector<std::size_t>,
            std::pair<std::size_t, std::size_t>> key;
  key.first.resize(num_vertices_per_facet);
  for (int p = 0; p < num_processes; ++p)
  {
    // Unpack into map
    const std::vector<std::size_t>& data_p = received_buffer[p];
    for (auto it = data_p.begin(); it != data_p.end();
         it += (num_vertices_per_facet + 1))
    {
      // Build map key
      std::copy(it, it + num_vertices_per_facet, key.first.begin());
      key.second.first = p;
      key.second.second = *(it + num_vertices_per_facet);

      // Perform map insertion/look-up
      std::pair<MatchMap::iterator, bool> data = matchmap.insert(key);

      // If data is already in the map, extract data and remove from
      // map
      if (!data.second)
      {
        // Found a match of two facets - send back to owners
        const std::size_t proc1 = data.first->second.first;
        const std::size_t proc2 = p;
        const std::size_t cell1 = data.first->second.second;
        const std::size_t cell2 = key.second.second;
        send_buffer[proc1].push_back(cell1);
        send_buffer[proc1].push_back(cell2);
        send_buffer[proc2].push_back(cell2);
        send_buffer[proc2].push_back(cell1);

        // Remove facet - saves memory and search time
        matchmap.erase(data.first);
      }
    }
  }

  // Send matches to other processes
  MPI::all_to_all(mpi_comm, send_buffer, received_buffer);

  // Clear ghost vertices
  ghost_vertices.clear();

  // Flatten received data and insert connected cells into local map
  std::int32_t num_nonlocal_edges = 0;
  for (std::size_t p = 0; p < received_buffer.size(); ++p)
  {
    const std::vector<std::size_t>& cell_list = received_buffer[p];
    for (std::size_t i = 0; i < cell_list.size(); i += 2)
    {
      dolfin_assert((std::int64_t) cell_list[i] >= offset);
      dolfin_assert((std::int64_t)  (cell_list[i] - offset)
                    < (std::int64_t) local_graph.size());

      //local_graph[cell_list[i] - offset].insert(cell_list[i + 1]);
      auto& edges = local_graph[cell_list[i] - offset];
      auto it = std::find(edges.begin(), edges.end(), cell_list[i + 1]);
      if (it == local_graph[cell_list[i] - offset].end())
        edges.push_back(cell_list[i + 1]);

      ghost_vertices.insert(cell_list[i + 1]);
    }

    ++num_nonlocal_edges;
  }

  return num_nonlocal_edges;
}
Пример #9
0
/** \brief get the centroid of the neighbourhood of an image pixel given by it's offset */
valarray<double> centroid::operator()(const size_t& l) const
{
    const int scope = 1;
    //convert the raveled index to 3D indices
	size_t
		i = l / image.strides()[0],
		j = (l % image.strides()[0]) / image.strides()[1],
		k = (l % image.strides()[0]) % image.strides()[1];
    //cout<<"l="<<l<<" -> i="<<i<<" j="<<j<<" k="<<k<<" ... ";

	//the data of the neighbourhood view are copied together for the coder's sanity
	boost::multi_array<float,3> ngb =
		image[boost::indices
				[image.shape()[0]<2*scope+1 ? range() : range(i-scope, i+scope+1)]
				[image.shape()[1]<2*scope+1 ? range() : range(j-scope, j+scope+1)]
				[image.shape()[2]<2*scope+1 ? range() : range(k-scope, k+scope+1)]
			];
    //Find the extrema of the neighbourhood.
    std::pair<float*, float*> minmax = boost::minmax_element(ngb.origin(), ngb.origin()+ngb.num_elements());
    //If the neighbourhood contains a negative pixel, we are at the edge of a Fourier filtering artefact that should not be considered a particle
    if(*minmax.first < 0)
        return valarray<double>(-1.0, 3);
	//marking non local maxima (including diagonals)
	if(image.origin()[l] != *minmax.second)
        return valarray<double>(-1.0, 3);

	//calculation of the intensity centroid
	valarray<double> c(0.0,3);
	double total_w = 0.0;
	float *px = ngb.origin();
	for(int x=0; x<ngb.shape()[0];++x)
        for(int y=0; y<ngb.shape()[1];++y)
            for(int z=0; z<ngb.shape()[2];++z)
            {
                const double weight = pow((double)(x-scope), 2) + pow((double)(y-scope), 2) + pow((double)(z-scope), 2) * (double)(*px);
                c[0] += (x-scope)*weight;
                c[1] += (y-scope)*weight;
                c[2] += (z-scope)*weight;
                total_w += weight ;
                px++;
            }
    //cout<<c[0]<<"\t"<<c[1]<<"\t"<<c[2]<<endl;
    //cout<<"divide by a weight of "<<total_w<<endl;
    c /= total_w/pow(2.0*scope+1, 2);
    //cout<<c[0]<<"\t"<<c[1]<<"\t"<<c[2]<<endl;
    //c /= (double)accumulate(ngb.origin(), ngb.origin()+ngb.num_elements(), 0.0);

	//double sum = accumulate(ngb.origin(),ngb.origin()+ngb.num_elements(),0.0);
	//cout<<"valarrays ... ";
	/*valarray<double> c(0.0,3), pos(0.0,3), middle(0.0,3);
	for(size_t d=0; d<3;++d)
		middle[d] = ngb.shape()[d]/3;
	float *v = ngb.origin();
	for(pos[0]=0;pos[0]<ngb.shape()[0];++pos[0])
		for(pos[1]=0;pos[1]<ngb.shape()[1];++pos[1])
			for(pos[2]=0;pos[2]<ngb.shape()[2];++pos[2])
				c += (pos-middle) * (*v++);//pow(*v++, 2.0f);
	c /= image.origin()[l];//pow(image.origin()[l], 2.0f);

	for(size_t d=0;d<3;++d)
        c[d] = (c[d]<0?-1:1) * sqrt(abs(c[d]))/4.5;*/
	c[0] += i;
	c[1] += j;
	c[2] += k;
	return c;
};
Пример #10
0
boost::multi_array<double, DIMENSION>
get_imag_parts(const boost::multi_array<SCALAR, DIMENSION> &data) {
  boost::multi_array<double, DIMENSION> imag_part(data.shape());
  std::transform(data.begin(), data.end(), imag_part.begin(), get_imag);
  return imag_part;
}
Пример #11
0
void Octtree::find_cell_ranks( const boost::multi_array<Real,2>& coordinates, std::vector<Uint>& ranks )
{
  ranks.resize(coordinates.size());

  Handle< Elements > element_component;
  Uint element_idx;
  std::deque<Uint> missing_cells;

  RealVector dummy(m_dim);

  for(Uint i=0; i<coordinates.size(); ++i)
  {
    for (Uint d=0; d<m_dim; ++d)
      dummy[d] = coordinates[i][d];
    if( find_element(dummy,element_component,element_idx) )
    {
      ranks[i] = Comm::instance().rank();
    }
    else
    {
      ranks[i] = math::Consts::uint_max();
      missing_cells.push_back(i);
    }
  }

  std::vector<Real> send_coords(m_dim*missing_cells.size());
  std::vector<Real> recv_coords;

  Uint c(0);
  boost_foreach(const Uint i, missing_cells)
  {
    for(Uint d=0; d<m_dim; ++d)
      send_coords[c++]=coordinates[i][d];
  }

  for (Uint root=0; root<PE::Comm::instance().size(); ++root)
  {

    recv_coords.resize(0);
    PE::Comm::instance().broadcast(send_coords,recv_coords,root,m_dim);

    // size is only because it doesn't get resized for this rank
    std::vector<Uint> send_found(missing_cells.size(),math::Consts::uint_max());

    if (root!=Comm::instance().rank())
    {
      std::vector<RealVector> recv_coordinates(recv_coords.size()/m_dim) ;
      boost_foreach(RealVector& realvec, recv_coordinates)
          realvec.resize(m_dim);

      c=0;
      for (Uint i=0; i<recv_coordinates.size(); ++i)
      {
        for(Uint d=0; d<m_dim; ++d)
          recv_coordinates[i][d]=recv_coords[c++];
      }

      send_found.resize(recv_coordinates.size());
      for (Uint i=0; i<recv_coordinates.size(); ++i)
      {
        if( find_element(recv_coordinates[i],element_component,element_idx) )
        {
          send_found[i] = Comm::instance().rank();
        }
        else
          send_found[i] = math::Consts::uint_max();
      }
    }

    std::vector<Uint> recv_found(missing_cells.size()*Comm::instance().size());
    PE::Comm::instance().gather(send_found,recv_found,root);

    if( root==Comm::instance().rank())
    {
      const Uint stride = missing_cells.size();
      for (Uint i=0; i<missing_cells.size(); ++i)
      {
        for(Uint p=0; p<Comm::instance().size(); ++p)
        {
          ranks[missing_cells[i]] = std::min(recv_found[i+p*stride] , ranks[missing_cells[i]]);
        }
      }
    }
  }
}
Пример #12
0
size_t cols(const boost::multi_array<T, N>& arr)
{
    return arr.shape()[1];
}
Пример #13
0
size_t rows(const boost::multi_array<T, N>& arr)
{
    return arr.shape()[0];
}
Пример #14
0
  /** Returns the total number of elements in the buffer

      Equal to get_range()[0] * ... * get_range()[dimensions-1].
  */
  auto get_count() const {
    return allocation.num_elements();
  }
Пример #15
0
// writes a 2d array containing rgb information into a png file
bool write_png(const boost::multi_array<png::rgb,2>& pixel,
               const char* filename)
{
    FILE *fp=NULL;
    fp = fopen(filename, "wb");
    if(fp==NULL) {
        printf("write_png: error writing file %s\n", filename);
        return false;
    }

    // we need to copy the data into an array
    png_byte	**Array;
    int	height, width;
    height=pixel.shape()[0];
    width=pixel.shape()[1];

    Array = new png_byte*[height];
    for(int i=0; i<height; i++) {
        Array[i] = new png_byte[3*width];
        for(int j=0; j<width; j++) {
            assert( pixel[i][j].r<=255 );
            assert( pixel[i][j].g<=255 );
            assert( pixel[i][j].b<=255 );
            Array[i][3*j]=(png_byte) pixel[i][j].r;
            Array[i][3*j+1]=(png_byte) pixel[i][j].g;
            Array[i][3*j+2]=(png_byte) pixel[i][j].b;
        }
    }


    // initialise png_struct and png_info
    png_structp	png_ptr;
    png_infop	info_ptr;

    png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING,
                                      NULL,writepng_error_handler,NULL);
    if(!png_ptr) {
        printf("write_png: error initialising png_ptr\n");
        return(false);
    }

    info_ptr = png_create_info_struct(png_ptr);
    if(!info_ptr) {
        printf("write_png: error creating info_ptr\n");
        png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
        return(false);
    }

    // set error handling (very strange!)
    if(setjmp(png_jmpbuf(png_ptr))) {
        printf("write_png: general error\n");
        png_destroy_write_struct(&png_ptr, &info_ptr);
        fclose(fp);
        return(false);
    }

    // making sure fp is opened in binary mode
    png_init_io(png_ptr, fp);

    // set image parameters
    png_set_compression_level(png_ptr, Z_BEST_COMPRESSION);
    png_set_IHDR(png_ptr, info_ptr, width, height,
                 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
                 PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);

    // writing the header
    png_write_info(png_ptr, info_ptr);

    // writing the actual data, provided by pointers to rows
    png_write_image(png_ptr, Array);

    // finishing up
    png_write_end(png_ptr, info_ptr);
    png_destroy_write_struct(&png_ptr, &info_ptr);
    for(int i=0; i<height; i++) {
        delete[] Array[i];
    }
    delete[] Array;
    fclose(fp);

    return true;

}