예제 #1
0
파일: Smooth.cpp 프로젝트: seenen/VR
void smooth::laplace_smooth(const float scale)
{
	vector<vertex_3> displacements(vertices.size(), vertex_3(0, 0, 0));

	// Get per-vertex displacement.
	for (size_t i = 0; i < vertices.size(); i++)
	{
		// Skip rogue vertices (which were probably made rogue during a previous
		// attempt to fix mesh cracks).
		if (0 == vertex_to_vertex_indices[i].size())
			continue;

		const float weight = 1.0f / static_cast<float>(vertex_to_vertex_indices[i].size());

		for (size_t j = 0; j < vertex_to_vertex_indices[i].size(); j++)
		{
			size_t neighbour_j = vertex_to_vertex_indices[i][j];
			displacements[i] += (vertices[neighbour_j] - vertices[i])*weight;
		}
	}

	// Apply per-vertex displacement.
	for (size_t i = 0; i < vertices.size(); i++)
		vertices[i] += displacements[i] * scale;
}
예제 #2
0
std::vector<int> GetGathervDisplacements(const size_t *counts,
                                         const size_t countsSize)
{
    std::vector<int> displacements(countsSize);
    displacements[0] = 0;

    for (size_t i = 1; i < countsSize; ++i)
    {
        displacements[i] =
            displacements[i - 1] + static_cast<int>(counts[i - 1]);
    }
    return displacements;
}
예제 #3
0
double ChainStatisticsOptimizerState::get_diffusion_coefficient() const {
  if (positions_.empty()) return 0;
  algebra::Vector3Ds positions(positions_.size());
  for (unsigned int i=0; i< positions_.size(); ++i) {
    positions[i]= std::accumulate(positions_[i].begin(), positions_[i].end(),
                           algebra::get_zero_vector_d<3>())
        /positions_[i].size();
  }
  algebra::Vector3Ds
    displacements(positions.size()-1);
  for (unsigned int i=1; i< positions_.size(); ++i) {
    displacements[i-1]= positions[i] -positions[i-1];
  }
  return atom::get_diffusion_coefficient(displacements,
                                         get_period()*get_dt());
}
예제 #4
0
void ParallelBFS::calculate(NodeId root) {
  distance.assign((size_t) vertex_count, infinity);
  NodeId level = 1;
  NodeList frontier;
  frontier.reserve((size_t)vertex_count);
  if (comm.rank() == find_owner(root)) {
    frontier.push_back(root - first_vertex);
    distance[root - first_vertex] = 0;
  }
  std::vector<NodeList> send_buf((size_t)comm.size());
  NodeList new_frontier;

  NodeList sizes((size_t)comm.size()),
           displacements((size_t)comm.size());

  while (mpi::all_reduce(comm, (NodeId)frontier.size(),
                         std::plus<NodeId>()) > 0) {
    for (NodeId u : frontier)
      for (int e = vertices[u]; e < vertices[u + 1]; ++e) {
        int v = edges[e];
        send_buf[find_owner(v)].push_back(v);
      }
    for (int i = 0; i < comm.size(); ++i) {
      mpi::gather(comm, (NodeId)send_buf[i].size(), sizes.data(), i);
      if (i == comm.rank()) {
        for (int j = 1; j < comm.size(); ++j)
          displacements[j] = displacements[j - 1] + sizes[j - 1];
        new_frontier.resize(
            (size_t)(displacements[comm.size()-1] + sizes[comm.size() - 1]));
        mpi::gatherv(comm, send_buf[i], new_frontier, sizes, displacements, i);
      } else {
        mpi::gatherv(comm, send_buf[i], i);
      }
    }
    for (size_t i = 0; i < comm.size(); ++i)
      send_buf[i].clear();
    frontier.clear();
    for (int v : new_frontier) {
      v -= first_vertex;
      if (distance[v] == infinity) {
        distance[v] = level;
        frontier.push_back(v);
      }
    }
    ++level;
  }
}
예제 #5
0
Floats ChainStatisticsOptimizerState::get_diffusion_coefficients() const {
  if (positions_.empty()) return Floats();
  base::Vector<algebra::Vector3Ds >
    displacements(positions_[0].size(),
                  algebra::Vector3Ds( positions_.size()-1));
  for (unsigned int i=1; i< positions_.size(); ++i) {
    algebra::Transformation3D rel
        = algebra::get_transformation_aligning_first_to_second(positions_[i-1],
                                                               positions_[i]);
    for (unsigned int j=0; j < positions_[i].size(); ++j) {
      displacements[j][i-1]= rel.get_transformed(positions_[i-1][j])
        - positions_[i][j];
    }
  }
  Floats ret;
  for (unsigned int i=0; i < displacements.size(); ++i) {
    ret.push_back(atom::get_diffusion_coefficient(displacements[i],
                                                  get_period()*get_dt()));
  }
  return ret;
}
예제 #6
0
void NormalDriver::set_predicted_vertex_positions( const SurfTrack& surf, std::vector<Vec3d>& predicted_positions, double current_t, double& adaptive_dt )
{
    std::vector<Vec3d> displacements( surf.get_num_vertices(), Vec3d(0,0,0) );
    std::vector<Vec3d> velocities( surf.get_num_vertices(), Vec3d(0,0,0) );
    
    for ( unsigned int i = 0; i < surf.get_num_vertices(); ++i )
    {
        if ( surf.m_mesh.m_vertex_to_triangle_map[i].empty() ) 
        { 
            displacements[i] = Vec3d(0,0,0);
            continue;
        }
        
        Vec3d normal(0,0,0);
        double sum_areas = 0.0;
        for ( unsigned int j = 0; j < surf.m_mesh.m_vertex_to_triangle_map[i].size(); ++j )
        {
            double area = surf.get_triangle_area( surf.m_mesh.m_vertex_to_triangle_map[i][j] );
            normal += surf.get_triangle_normal( surf.m_mesh.m_vertex_to_triangle_map[i][j] ) * area;
            sum_areas += area;
        }
        //normal /= sum_areas;
        normal /= mag(normal);
        
        double switch_speed = (current_t >= 1.0) ? -speed : speed;
        velocities[i] = switch_speed * normal;
        displacements[i] = adaptive_dt * velocities[i];
    }
    
    double capped_dt = MeshSmoother::compute_max_timestep_quadratic_solve( surf.m_mesh.get_triangles(), surf.get_positions(), displacements, false );
    
    adaptive_dt = min( adaptive_dt, capped_dt );
    
    for ( unsigned int i = 0; i < surf.get_num_vertices(); ++i )
    {
        predicted_positions[i] = surf.get_position(i) + adaptive_dt * velocities[i];
    }
    
}
예제 #7
0
파일: flatvol.cpp 프로젝트: 21hub/QuantLib
    shared_ptr<MarketModel>
    FlatVolFactory::create(const EvolutionDescription& evolution,
                                  Size numberOfFactors) const {
        const vector<Time>& rateTimes = evolution.rateTimes();
        Size numberOfRates = rateTimes.size()-1;

        vector<Rate> initialRates(numberOfRates);
        for (Size i=0; i<numberOfRates; ++i)
            initialRates[i] = yieldCurve_->forwardRate(rateTimes[i],
                                                       rateTimes[i+1],
                                                       Simple);

        vector<Volatility> displacedVolatilities(numberOfRates);
        for (Size i=0; i<numberOfRates; ++i) {
            Volatility vol = // to be changes
                volatility_(rateTimes[i]);
            displacedVolatilities[i] =
                initialRates[i]*vol/(initialRates[i]+displacement_);
        }

        vector<Spread> displacements(numberOfRates, displacement_);

        Matrix correlations = exponentialCorrelations(evolution.rateTimes(),
                                                      longTermCorrelation_,
                                                      beta_);
        shared_ptr<PiecewiseConstantCorrelation> corr(new
            TimeHomogeneousForwardCorrelation(correlations,
                                              rateTimes));
        return shared_ptr<MarketModel>(new
            FlatVol(displacedVolatilities,
                           corr,
                           evolution,
                           numberOfFactors,
                           initialRates,
                           displacements));
    }
예제 #8
0
void Sort<Hilbert::HilbertIndices,unsigned int>::communicate_bins()
{
  // Create storage for the global bin sizes.  This
  // is the number of keys which will be held in
  // each bin over all processors.
  std::vector<unsigned int> global_bin_sizes(_n_procs);

  libmesh_assert_equal_to (_local_bin_sizes.size(), global_bin_sizes.size());

  // Sum to find the total number of entries in each bin.
  // This is stored in global_bin_sizes.  Note, we
  // explicitly know that we are communicating MPI_UNSIGNED's here.
  MPI_Allreduce(&_local_bin_sizes[0],
                &global_bin_sizes[0],
                _n_procs,
                MPI_UNSIGNED,
                MPI_SUM,
                this->comm().get());

  // Create a vector to temporarily hold the results of MPI_Gatherv
  // calls.  The vector dest  may be saved away to _my_bin depending on which
  // processor is being MPI_Gatherv'd.
  std::vector<Hilbert::HilbertIndices> dest;

  unsigned int local_offset = 0;

  for (unsigned int i=0; i<_n_procs; ++i)
    {
      // Vector to receive the total bin size for each
      // processor.  Processor i's bin size will be
      // held in proc_bin_size[i]
      std::vector<unsigned int> proc_bin_size(_n_procs);

      // Find the number of contributions coming from each
      // processor for this bin.  Note: Allgather combines
      // the MPI_Gather and MPI_Bcast operations into one.
      // Note: Here again we know that we are communicating
      // MPI_UNSIGNED's so there is no need to check the MPI_traits.
      MPI_Allgather(&_local_bin_sizes[i], // Source: # of entries on this proc in bin i
                    1,                    // Number of items to gather
                    MPI_UNSIGNED,
                    &proc_bin_size[0],    // Destination: Total # of entries in bin i
                    1,
                    MPI_UNSIGNED,
                    this->comm().get());

      // Compute the offsets into my_bin for each processor's
      // portion of the bin.  These are basically partial sums
      // of the proc_bin_size vector.
      std::vector<unsigned int> displacements(_n_procs);
      for (unsigned int j=1; j<_n_procs; ++j)
        displacements[j] = proc_bin_size[j-1] + displacements[j-1];

      // Resize the destination buffer
      dest.resize (global_bin_sizes[i]);

      MPI_Gatherv((_data.size() > local_offset) ?
                  &_data[local_offset] :
                  NULL,                   // Points to the beginning of the bin to be sent
                  _local_bin_sizes[i],      // How much data is in the bin being sent.
                  Parallel::StandardType<Hilbert::HilbertIndices>(), // The data type we are sorting
                  (dest.empty()) ?
                  NULL :
                  &dest[0],               // Enough storage to hold all bin contributions
                  (int*) &proc_bin_size[0], // How much is to be received from each processor
                  (int*) &displacements[0], // Offsets into the receive buffer
                  Parallel::StandardType<Hilbert::HilbertIndices>(), // The data type we are sorting
                  i,                        // The root process (we do this once for each proc)
                  this->comm().get());

      // Copy the destination buffer if it
      // corresponds to the bin for this processor
      if (i == _proc_id)
        _my_bin = dest;

      // Increment the local offset counter
      local_offset += _local_bin_sizes[i];
    }
}
예제 #9
0
void Sort<KeyType,IdxType>::communicate_bins()
{
#ifdef LIBMESH_HAVE_MPI
  // Create storage for the global bin sizes.  This
  // is the number of keys which will be held in
  // each bin over all processors.
  std::vector<IdxType> global_bin_sizes = _local_bin_sizes;

  // Sum to find the total number of entries in each bin.
  this->comm().sum(global_bin_sizes);

  // Create a vector to temporarily hold the results of MPI_Gatherv
  // calls.  The vector dest  may be saved away to _my_bin depending on which
  // processor is being MPI_Gatherv'd.
  std::vector<KeyType> dest;

  IdxType local_offset = 0;

  for (processor_id_type i=0; i<_n_procs; ++i)
    {
      // Vector to receive the total bin size for each
      // processor.  Processor i's bin size will be
      // held in proc_bin_size[i]
      std::vector<IdxType> proc_bin_size;

      // Find the number of contributions coming from each
      // processor for this bin.  Note: allgather combines
      // the MPI_Gather and MPI_Bcast operations into one.
      this->comm().allgather(_local_bin_sizes[i], proc_bin_size);

      // Compute the offsets into my_bin for each processor's
      // portion of the bin.  These are basically partial sums
      // of the proc_bin_size vector.
      std::vector<IdxType> displacements(_n_procs);
      for (processor_id_type j=1; j<_n_procs; ++j)
        displacements[j] = proc_bin_size[j-1] + displacements[j-1];

      // Resize the destination buffer
      dest.resize (global_bin_sizes[i]);

      MPI_Gatherv((_data.size() > local_offset) ?
                  &_data[local_offset] :
                  NULL,                            // Points to the beginning of the bin to be sent
                  _local_bin_sizes[i],               // How much data is in the bin being sent.
                  Parallel::StandardType<KeyType>(), // The data type we are sorting
                  (dest.empty()) ?
                  NULL :
                  &dest[0],                        // Enough storage to hold all bin contributions
                  (int*) &proc_bin_size[0],          // How much is to be received from each processor
                  (int*) &displacements[0],          // Offsets into the receive buffer
                  Parallel::StandardType<KeyType>(), // The data type we are sorting
                  i,                                 // The root process (we do this once for each proc)
                  this->comm().get());

      // Copy the destination buffer if it
      // corresponds to the bin for this processor
      if (i == _proc_id)
        _my_bin = dest;

      // Increment the local offset counter
      local_offset += _local_bin_sizes[i];
    }
#endif // LIBMESH_HAVE_MPI
}
예제 #10
0
파일: psrs.c 프로젝트: florianpilz/PSRS
int main( int argc, char *argv[] )
{
  // MPI-Variablen und Initialisierung
  int rank, size;
  MPI_Init(&argc, &argv);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Comm_size (MPI_COMM_WORLD, &size);

  // Zeitmesspunkte
  double time_start, time_gen, time_local_sort, time_since_last;
  double time_comm_1, time_comm_2, time_comm_3, time_comm_4, time_comm_5, time_comm_6, time_comm_7, time_comm_8;
  double time_org_1, time_org_2, time_org_3, time_org_4, time_org_5, time_org_6, time_org_7, time_org_8;

  // (minimalistischer) Plausibilitäts-Check der Übergabeparameter
  if ( argc < 2 )
  {
    if ( rank == 0 )
      printf( "Synopsis: psrs <size of random array> [<output mode=(silent|table|full)>]\n" );
    MPI_Finalize();
    return 1;
  }

  // Größe des zu erzeugenden Zahlenfeldes bestimmen
  int numbers_size = atoi( argv[1] );

  // Detailgrad der Ausgabe auf stdout bestimmen
  int output_level = 0;
  if ( argc == 3 )
  {
    if ( strcmp( argv[2], "full" ) == 0 )
      output_level = 0;
    if ( strcmp( argv[2], "table" ) == 0 )
      output_level = 1;
    if ( strcmp( argv[2], "silent" ) == 0 )
      output_level = 2;
  }

  // Zeitmessung initialisieren
  time_start = MPI_Wtime();
  time_since_last = time_start;

  // Zufallszahlen erzeugen
  int numbers[ numbers_size ];
  if (rank == 0) {
    // eigentliche Erzeugung
    generate_random_numbers(numbers, numbers_size);
    // Ausgabe der Aufrufparameter
    if ( output_level == 0 )
      printf("starting to sort %d values on %d nodes\n\n", numbers_size, size);
    if ( output_level == 1 )
      printf("%d %d ", numbers_size, size);
  }

  // Zeit für Zufallszahlengenerierung messen
  time_gen = MPI_Wtime() - time_since_last;
  time_since_last += time_gen;

  // Zufallszahlen gleichmäßig verteilen
  int temp_numbers_per_processor_size = numbers_size / size;
  // Anzahl der bei Abrundung von ( numbers_size / size ) übrig gebliebenen Zahlen
  int spare_numbers = numbers_size - temp_numbers_per_processor_size * size;
  int numbers_per_processor_sizes[ size ];
  for ( int pos = 0; pos < size; pos++ )
  {
    numbers_per_processor_sizes[ pos ] = temp_numbers_per_processor_size;
    // übrig gebliebene Zahlen auf erste Knoten aufteilen
    if (spare_numbers > 0)
    {
      numbers_per_processor_sizes[ pos ]++;
      spare_numbers--;
    }
  }
  
  // Organisationszeit 1
  time_org_1 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_1;

  int numbers_per_processor_size;
  MPI_Scatter(numbers_per_processor_sizes, 1, MPI_INT, &numbers_per_processor_size, 1, MPI_INT, 0, MPI_COMM_WORLD); // TODO avoid by accessing numbers_per_processor_sizes[rank] since each processor knows arguments and cluster size
  
  // Kommunikationszeit 1
  time_comm_1 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_1;

  int scatter_displacements[size];
  displacements(scatter_displacements, numbers_per_processor_sizes, size); // TODO root process only
  
  // Organisationszeit 2
  time_org_2 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_2;

  int numbers_per_processor[ numbers_per_processor_size ];
  MPI_Scatterv(numbers, numbers_per_processor_sizes, scatter_displacements, MPI_INT, numbers_per_processor,
    numbers_per_processor_size, MPI_INT, 0, MPI_COMM_WORLD);

  // Kommunikationszeit 2
  time_comm_2 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_2;

  // lokal sortieren
  quicksort( numbers_per_processor, 0, numbers_per_processor_size-1 );

  // Zeit für lokales Sortieren
  time_local_sort = MPI_Wtime() - time_since_last;
  time_since_last += time_local_sort;

  // repräsentative Auswahl erstellen
  int w = numbers_size / ( size * size );
  int representative_selection[ size ];
  for( int pos=0; pos < size; pos++ )
    representative_selection[ pos ] = numbers_per_processor[ pos * w ]; // FIXME should be pos * w + 1

  // Organisationszeit 3
  time_org_3 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_3;

  // Auswahl auf einem Knoten einsammeln
  int selected_numbers[ size * size ];
  MPI_Gather( representative_selection, size, MPI_INT, selected_numbers, size, MPI_INT, 0, MPI_COMM_WORLD );

  // Kommunikationszeit 3
  time_comm_3 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_3;

  // Auswahl sortieren
  int pivots[ size - 1 ];
  if ( rank == 0 )
  {
    quicksort( selected_numbers, 0, size * size - 1 );
    // Pivots selektieren
    int t = size / 2;
    for ( int pos = 1; pos < size; pos++ )
      pivots[ pos - 1 ] = selected_numbers[ pos * size + t ];
  }

  // Organisationszeit 4
  time_org_4 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_4;

  // Pivots verteilen
  MPI_Bcast( pivots, size - 1, MPI_INT, 0, MPI_COMM_WORLD );

  // Kommunikationszeit 4
  time_comm_4 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_4;

  // Blockbildung
  int block_sizes[ size ];
  divide_into_blocks( block_sizes, size, numbers_per_processor, numbers_per_processor_size, pivots );

  // Organisationszeit 5
  time_org_5 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_5;

  // Blöcke nach Rang an Knoten versenden
  int receive_block_sizes[ size ];
  MPI_Alltoall( block_sizes, 1, MPI_INT, receive_block_sizes, 1, MPI_INT, MPI_COMM_WORLD );
  
  // Kommunikationszeit 5
  time_comm_5 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_5;

  int receive_block_displacements[ size ];
  displacements( receive_block_displacements, receive_block_sizes, size );
  int block_displacements[ size ];
  displacements( block_displacements, block_sizes, size );

  // Organisationszeit 6
  time_org_6 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_6;

  int blocksize = sum( receive_block_sizes, size );
  int blocks_per_processor[ blocksize ];
  MPI_Alltoallv( numbers_per_processor, block_sizes, block_displacements, MPI_INT, blocks_per_processor,
    receive_block_sizes, receive_block_displacements, MPI_INT, MPI_COMM_WORLD );

  // Kommunikationszeit 6
  time_comm_6 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_6;

  // jeder Knoten sortiert seine Blöcke
  quicksort( blocks_per_processor, 0, blocksize -1 );

  // Organisationszeit 7
  time_org_7 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_7;

  // sortierte Blöcke einsammeln
  // TODO kann durch MPI_Allgather statt MPI_Alltoall vermieden werden
  int blocksizes[ size ];
  MPI_Gather( &blocksize, 1, MPI_INT, blocksizes, 1, MPI_INT, 0, MPI_COMM_WORLD );

  // Kommunikationszeit 7
  time_comm_7 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_7;

  int receive_sorted_displacements[ size ];
  displacements( receive_sorted_displacements, blocksizes, size );

  // Organisationszeit 8
  time_org_8 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_8;

  int sorted[ numbers_size ];
  MPI_Gatherv( blocks_per_processor, blocksize, MPI_INT, sorted, blocksizes, receive_sorted_displacements,
    MPI_INT, 0, MPI_COMM_WORLD );

  // Kommunikationszeit 8
  time_comm_8 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_8;

  // lokale Sortierzeiten zusammenrechnen
  double time_comm = time_comm_1 + time_comm_2 + time_comm_3 + time_comm_4
    + time_comm_5 + time_comm_6 + time_comm_7 + time_comm_8;
  double time_org = time_org_1 + time_org_2 + time_org_3 + time_org_4
    + time_org_5 + time_org_6 + time_org_7 + time_org_8;
  double time_local_sort_total = 0.0;
  MPI_Reduce( &time_local_sort, &time_local_sort_total, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
  double time_comm_total = 0.0;
  MPI_Reduce( &time_comm, &time_comm_total, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
  double time_org_total = 0.0;
  MPI_Reduce( &time_org, &time_org_total, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );

  // Fertig!
  if ( rank == 0 )
  {
    // Durchschnittswerte
    double time_local_sort_mean = time_local_sort / size;
    double time_comm_mean = time_comm / size;
    double time_org_mean = time_org / size;
    // Konvertierung in Millisekunden
    time_gen *= 1000;
    time_local_sort_mean *= 1000;
    time_comm_mean *= 1000;
    time_org_mean *= 1000;
    // Ausgabe
    if ( output_level == 0 )
    {
      print_array( sorted, numbers_size );
      printf( "\ntime measurement:\n" );
      printf( "number generation   = %.15f msec\n", time_gen );
      printf( "local sort (avg)    = %.15f msec\n", time_local_sort_mean );
      printf( "communication (avg) = %.15f msec\n", time_comm_mean );
      printf( "organization (avg)  = %.15f msec\n", time_org_mean );
      printf( "-------------------\n" );
      printf( "total time (no gen) = %.15f msec\n", time_local_sort_mean + time_comm_mean + time_org_mean );
      printf( "total time          = %.15f msec\n", time_local_sort_mean + time_comm_mean + time_org_mean + time_gen );
    }
    if ( output_level == 1 )
    {
      // Reihenfolge wie bei Outputlevel 0
      printf( "%.15f %.15f %.15f %.15f %.15f %.15f ",
	      time_gen,
	      time_local_sort_mean,
	      time_comm_mean,
	      time_org_mean,
	      time_local_sort_mean + time_comm_mean + time_org_mean,
	      time_local_sort_mean + time_comm_mean + time_org_mean + time_gen
      );
      if ( is_sorted( sorted, numbers_size ) == 1 )
	printf( "valid\n" );
      else
	printf( "invalid\n" );
    }
  }
  MPI_Finalize();
  return 0;
}
예제 #11
0
std::vector<GBL::Displacement_t> findTheBallPipeline(const char* const videoFile, const ImageProc::ImageProc* imProc, Draw::DrawInterface& drawer,
		const Detector::DetectorInterface& detectorInterface, const Descriptor::DescriptorInterface& descriptorInterface, const Match::MatcherInterface& matcherInterface, const Displacement::DisplacementInterface& displacementInterface,
		InputMethod::InputMethodInterface& inputMethodInterface, OutputMethod::OutputMethodInterface& outputMethodInterface) {
	
	// Single threaded initialization phase
	if (inputMethodInterface.start(videoFile) != GBL::RESULT_SUCCESS) {
		LOG_ERROR("Could not open %s", videoFile);
		return std::vector < GBL::Displacement_t > (0);
	}

	// Open our own output interface in case we want to draw
	OutputMethod::OutputImageSequence* outImSeq = nullptr;
	if(GBL::drawResults_b) {
		outImSeq = new OutputMethod::OutputImageSequence();
		outImSeq->open("correspondence_frame");
	}

	// Get background image
	GBL::Frame_t background;
	LOG_INFO("Retrieving background");
	// For now take first frame as the background
	if (inputMethodInterface.getNextFrame(background) != GBL::RESULT_SUCCESS) {
		LOG_ERROR("Could not get background");
		return std::vector < GBL::Displacement_t > (0);
	}

	const uint32_t nbFrames = 50;
	GBL::DescriptorContainer_t descriptors[nbFrames];
	std::vector<GBL::Displacement_t> displacements(nbFrames);

	LOG_INFO("Processing frames");
	GBL::Frame_t* frame = new GBL::Frame_t;
	uint32_t i = 0;
	uint32_t sequenceNo = 0;
#pragma omp parallel shared(inputMethodInterface, detectorInterface, descriptorInterface, matcherInterface, displacementInterface, outputMethodInterface, background, displacements, descriptors, imProc, frame, i, sequenceNo, outImSeq)
{	

	#pragma omp single nowait
	while(inputMethodInterface.isMoreInput()) {
		if(inputMethodInterface.getNextFrame(*frame) != GBL::RESULT_SUCCESS) {
			// Lets assume max 30 fps and put the thread to sleep for a while
			usleep(20000);
			continue;
		}
		#pragma omp task firstprivate(i, frame, sequenceNo) shared(descriptors, detectorInterface, descriptorInterface, matcherInterface, displacements, displacementInterface, outputMethodInterface, imProc, background, outImSeq) 
		{
			// Description
			LOG_ENTER("Describing image %d", i); 
			descriptors[i].sequenceNo = sequenceNo;
			descriptionHelper(*frame, descriptors[i], background, detectorInterface, descriptorInterface, *imProc);

			// Check whether neighbours still exist
			uint32_t prevNeighbourIndex = (i+nbFrames-1) % nbFrames;
			if(descriptors[prevNeighbourIndex].sequenceNo == sequenceNo-1) {
				// Check neighbours whether they are ready
				if(descriptors[prevNeighbourIndex].ready == true) {
					LOG_INFO("Matching %d and %d", prevNeighbourIndex, i);
					GBL::MatchesContainer_t matches;
					matcherHelper(descriptors[prevNeighbourIndex], descriptors[i], matches, matcherInterface);
					LOG_INFO("Calculating displacement of %d and %d", prevNeighbourIndex, i);
					displacements[prevNeighbourIndex].sequenceNo = sequenceNo - 1;
					displacementHelper(descriptors[prevNeighbourIndex], descriptors[i], matches, displacements[prevNeighbourIndex], displacementInterface, outputMethodInterface);

					if (GBL::drawResults_b || GBL::showStuff_b) {
						GBL::Frame_t prevNeighbourFrame;
						LOG_INFO("Generating corresponding frame %d and %d", prevNeighbourIndex, i);
						// For the index of getFrame we need to add the background frame again
						if(inputMethodInterface.getFrame(sequenceNo, prevNeighbourFrame) == GBL::RESULT_SUCCESS) {
							Utils::Utils::drawResult(prevNeighbourFrame, *frame, drawer, descriptors[prevNeighbourIndex], descriptors[i], matches, outImSeq);	
						} else {
							LOG_WARNING("Could not get frame of neighbour");
						}
					}
				}
			} else {
				LOG_WARNING("Previous neighbour was someone else");
			}
			uint32_t nextNeighbourIndex = (i+1) % nbFrames;
			if(descriptors[nextNeighbourIndex].sequenceNo == sequenceNo+1) {
				if(descriptors[nextNeighbourIndex].ready == true) {
					LOG_INFO("Matching %d and %d", i, nextNeighbourIndex);
					GBL::MatchesContainer_t matches;
					matcherHelper(descriptors[i], descriptors[nextNeighbourIndex], matches, matcherInterface);
					LOG_INFO("Calculating displacement of %d and %d", i, nextNeighbourIndex);
					displacements[i].sequenceNo = sequenceNo;
					displacementHelper(descriptors[i], descriptors[nextNeighbourIndex], matches, displacements[i], displacementInterface, outputMethodInterface);
				}
			} else {
				LOG_WARNING("Next neighbour was someone else");
			}
			delete frame;
		}
		sequenceNo++;
		i = (i+1) % nbFrames;
		// Reset the i-th buffers
		descriptors[i].valid = false;
		descriptors[i].ready = false;
		descriptors[i].keypoints.clear();
		frame = new GBL::Frame_t;
	}
}
	if(GBL::drawResults_b) {
		outImSeq->close();
	}
	inputMethodInterface.stop();
	delete frame;
	return displacements;
}