コード例 #1
0
ファイル: merge_sort.cpp プロジェクト: monkeylyf/interviewjam
  static void merge_sort_vector(vector<T> *vect) {
    if (vect->size() <= 1) {
      return;
    }
    typename std::vector<T>::size_type mid = vect->size() / 2;
    typename std::vector<T> left(vect->begin(), vect->begin() + mid);
    typename std::vector<T> right(vect->begin() + mid, vect->end());

    merge_sort_vector(&left);
    merge_sort_vector(&right);

    merge(
      left.begin(), left.end(), right.begin(), right.end(), vect->begin());
  }
コード例 #2
0
ファイル: csv.hpp プロジェクト: PizzaFactory/hrp2ev3
      void write( OutputIterator result ) const
    {
      const CharT dquo = widen< CharT >( '\"' );
      const CharT comma = widen< CharT >( ',' );
      const CharT lf = widen< CharT >( '\n' );

      for ( typename std::vector< std::vector< string_type > >::const_iterator r_iter( records_.begin() ), r_last( records_.end() );
            r_iter != r_last;
            ++r_iter )
      {
        for ( typename std::vector< string_type >::const_iterator f_iter( r_iter->begin() ), f_last( r_iter->end() );
              f_iter != f_last;
              ++f_iter )
        {
          std::string field;
          bool need_escape = false;

          for ( typename string_type::const_iterator s_iter( f_iter->begin() ), s_last( f_iter->end() );
                s_iter != s_last;
                ++s_iter )
          {
            CharT ch = *s_iter;
            if ( ch == dquo )
            {
              field.push_back( dquo );
              need_escape = true;
            }
            else if ( ch == comma || ch == lf )
            {
              need_escape = true;
            }
            field.push_back( ch );
          }

          if ( need_escape )
          {
            field = dquo + field + dquo;
          }
          result = std::copy( field.begin(), field.end(), result );
          if ( boost::next( f_iter ) != f_last )
          {
            *result++ = comma;
          }
        }
        *result++ = lf;
      }
    }
コード例 #3
0
 property_t(const std::vector<std::vector<T> >& mtx) {
     vector matrix;
     for(typename std::vector<std::vector<T> >::const_iterator i = mtx.begin(); i != mtx.end(); ++i) {
         vector row;
         for(typename std::vector<T>::const_iterator j = i->begin(); j != i->end(); ++j)
             row.push_back(*j);
         matrix.push_back(row);
     }
     property_base::operator= <vector>(matrix);
 }
コード例 #4
0
void shiftGrid( std::vector<std::vector<Point2> >& grid, const Point2& shift )
{
	for( typename std::vector<std::vector<Point2> >::iterator itv = grid.begin(), itvEnd = grid.end();
	     itv != itvEnd;
	     ++itv )
	{
		for( typename std::vector<Point2>::iterator it = itv->begin(), itEnd = itv->end();
		     it != itEnd;
		     ++it  )
		{
			*it += shift;
		}
	}
}
コード例 #5
0
inline typename FloatTraits::value_type
minimum_cycle_mean(const Graph &g, VertexIndexMap vim,
                   EdgeWeightMap ewm, EdgeIndexMap eim,
                   std::vector<typename graph_traits<Graph>::edge_descriptor>* pcc = 0,
                   FloatTraits ft = FloatTraits())
{
    typedef typename remove_const<
        typename property_traits<EdgeWeightMap>::value_type
    >::type Weight;
    typename std::vector<Weight> ed_w2(boost::num_edges(g), 1);
    return minimum_cycle_ratio(g, vim, ewm,
                               make_iterator_property_map(ed_w2.begin(), eim),
                               pcc, ft);
}
コード例 #6
0
void fillImageWithDecompostion(const std::vector<Curve> & contours,
                               png::image<png::rgb_pixel> & output)
{
  // Color map for the DLL
  const png::rgb_pixel pixelColors[4] = {
    // redPixel
    png::rgb_pixel(255,0,0),
    // greenPixel
    png::rgb_pixel(0,255,0),
    // bluePixel
    png::rgb_pixel(0,0,255),
    // yellowPixel
    png::rgb_pixel(255,255,0)
  };

  typedef typename DLL::Segment<DLL_Type>   DLLSegment;
  Utils::GreedyDecomposition<DLLSegment> decompositor;

  unsigned int nbDLL = 0;
  for (typename std::vector<Curve>::const_iterator contourItor = contours.begin();
       contourItor != contours.end(); ++contourItor) {
    // decompose the current contour
    typename std::vector<DLLSegment> dlls = decompositor.decomposeCurve(*contourItor);
    // color each DLL segment into the output image
    // and display the points coordinates of the DLL segment on the console
    unsigned int colorIndex = 3;
    for (typename std::vector<DLLSegment>::const_iterator dllItor = dlls.begin();
         dllItor != dlls.end(); ++dllItor) {
      const Curve & curve = dllItor->getCurve();
      if (verbose)
        std::cout << "DLL " << ++nbDLL << ":\t" << *dllItor
                  << "\n\tPoints: ";
      for(typename Curve::const_iterator coordItor = curve.begin();
          coordItor != curve.end();
          ++coordItor) {
        output.set_pixel(coordItor->first, coordItor->second,
                         pixelColors[colorIndex]);
        if (verbose)
          std::cout << "(" << coordItor->first << ","
                    << coordItor->second << ") ";
      }
      if (verbose)
        std::cout << std::endl;
      colorIndex = (colorIndex + 1) % 3;
    }
  }
}
コード例 #7
0
ファイル: DataSegmentGeneral.cpp プロジェクト: Benzlxs/PRSM
void
  Datasegments<Scalar>::
  buildList ( M3& KMat, Scalar* centers, int nSegments, typename std::vector< PatchCenter<Scalar> >& projPatchCenters)
{
  projPatchCenters.clear();
  projPatchCenters.resize( nSegments );

  // first project current solutions:
  for (int i=0;i<nSegments;i++)
  {
    P3 p0     = KMat * P3( &centers[3*i] );// center is in camera coords
    p0 /= p0[2];
    //      projPatchCenters[i]   = PatchCenter<Scalar>( int(p0[0]-0.5), int (p0[1]-0.5), P3( &centers[3*i] ), i );
    projPatchCenters[i]   = PatchCenter<Scalar>( int(floor(p0[0]-0.5)), int (floor(p0[1]-0.5)), p0, i );
  }
  std::sort( projPatchCenters.begin(), projPatchCenters.end() );
  // need also a map where a certain segment is now, to start searching
}
コード例 #8
0
ファイル: util.cpp プロジェクト: randprint/pentagram-xenon
template<class T> void ArgvToString(const std::vector<T> &argv, T &args)
{
    // Clear the string
    args.clear();

    typename std::vector<T>::const_iterator i;
    typename T::const_iterator j;
    int ch;

    for(i = argv.begin(); i != argv.end(); ++i)
    {
        for(j = i->begin(); j != i->end(); ++j)
        {
            ch = *j;

            // No quoting, only escaping

            // Handle \, ", ', \n, \r, \t., ' '
            if (ch == '\\' || ch == '\"' || ch == '\'' || ch == ' ')
            {
                args += '\\';
            }
            else if (ch == '\n')
            {
                args += '\\';
                ch = 'n';
            }
            else if (ch == '\r')
            {
                args += '\\';
                ch = 'r';
            }
            else if (ch == '\t')
            {
                args += '\\';
                ch = 't';
            }

            args += ch;
        }
        args += ' ';
    }
}
コード例 #9
0
template < typename IN_PORT_TYPE > int vector_sink_s_base::_analyzerServiceFunction( typename  std::vector< gr_istream< IN_PORT_TYPE > > &istreams ) {

  typedef typename std::vector< gr_istream< IN_PORT_TYPE > > _IStreamList;

  boost::mutex::scoped_lock lock(serviceThreadLock);

  if ( validGRBlock() == false ) {
    
    // create our processing block
    createBlock();

    LOG_DEBUG(  vector_sink_s_base, " FINISHED BUILDING  GNU RADIO BLOCK");
  }
   
  // process any Stream ID changes this could affect number of io streams
  processStreamIdChanges();
    
  if ( !validGRBlock() || istreams.size() == 0 ) {
    LOG_WARN(vector_sink_s_base, "NO STREAMS ATTACHED TO BLOCK..." );
    return NOOP;
  }

  // resize data vectors for passing data to GR_BLOCK object
  _input_ready.resize( istreams.size() );
  _ninput_items_required.resize( istreams.size());
  _ninput_items.resize( istreams.size());
  _input_items.resize(istreams.size());
  _output_items.resize(0);
  
  //
  // RESOLVE: need to look at forecast strategy, 
  //    1)  see how many read items are necessary for N number of outputs
  //    2)  read input data and see how much output we can produce
  //
  
  //
  // Grab available data from input streams
  //
  typename _IStreamList::iterator istream = istreams.begin();
  int nitems=0;
  for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) {
    // note this a blocking read that can cause deadlocks
    nitems = istream->read();
    
    if ( istream->overrun() ) {
        LOG_WARN( vector_sink_s_base, " NOT KEEPING UP WITH STREAM ID:" << istream->streamID );
    }
    
    // RESOLVE issue when SRI changes that could affect the GNU Radio BLOCK
    if ( istream->sriChanged() ) {
      LOG_DEBUG( vector_sink_s_base, "SRI CHANGED, STREAMD IDX/ID: " 
               << idx << "/" << istream->pkt->streamID );
    }
  }

  LOG_TRACE( vector_sink_s_base, "READ NITEMS: "  << nitems );
  if ( nitems <= 0 && !_istreams[0].eos() ) return NOOP;

  bool exitServiceFunction = false;
  bool eos = false;
  int  nout = 0;
  while ( nout > -1 && !exitServiceFunction && serviceThread->threadRunning() ) {

    eos = false;
    nout = _forecastAndProcess( eos, istreams );
    if ( nout > -1  ) {
      // we chunked on data so move read pointer..
      istream = istreams.begin();
      for ( ; istream != istreams.end(); istream++ ) {

	int idx=std::distance( istreams.begin(), istream );
	// if we processed data for this stream
	if ( _input_ready[idx] ) {
	  size_t nitems = 0;
	  try {
	    nitems = gr_sptr->nitems_read( idx );
	  }
	  catch(...){}
      
	  if ( nitems > istream->nitems() ) {
	       LOG_WARN( vector_sink_s_base,  "WORK CONSUMED MORE DATA THAN AVAILABLE,  READ/AVAILABLE " << nitems << "/" << istream->nitems() );
               nitems = istream->nitems();
	  }
	  istream->consume( nitems );
          LOG_TRACE( vector_sink_s_base, " CONSUME READ DATA  ITEMS/REMAIN " << nitems << "/" << istream->nitems());
	}

      }
      gr_sptr->reset_read_index();
    }

    // check for not enough data return
    if ( nout == -1 ) {

      // check for  end of stream
      istream = istreams.begin();
      for ( ; istream != istreams.end() ; istream++) if ( istream->eos() ) eos=true;

      if ( eos ) {
        LOG_TRACE( vector_sink_s_base, " DATA NOT READY, EOS:" << eos );
	_forecastAndProcess( eos, istreams );
      }

      exitServiceFunction = true;
    }

  }

  if ( eos ) {

    istream = istreams.begin();
    for ( ; istream != istreams.end() ; istream++) {
      int idx=std::distance( istreams.begin(), istream );
      LOG_TRACE( vector_sink_s_base, " CLOSING INPUT STREAM IDX:" << idx );
      istream->close();
    }
  }

  //
  // set the read pointers of the GNU Radio Block to start at the beginning of the 
  // supplied buffers
  //
  gr_sptr->reset_read_index();

  LOG_TRACE( vector_sink_s_base, " END OF ANALYZER SERVICE FUNCTION....." << noutput_items );

  if ( nout == -1 && eos == false )
    return NOOP; 
  else
    return NORMAL;
}
コード例 #10
0
ファイル: test_dfs_visit.cpp プロジェクト: tttor/tor-cpp-ws
int
main()
{
  using namespace boost;
  //===========================================================================
  // Declare the graph type and object, and some property maps.

  typedef adjacency_list<vecS, vecS, directedS, property<vertex_name_t, std::string, property<vertex_color_t, default_color_type> >, property<edge_name_t, std::string, property<edge_weight_t, int> > > Graph;
  
  typedef graph_traits<Graph> Traits;
  typedef Traits::vertex_descriptor Vertex;
  typedef Traits::edge_descriptor Edge;
  typedef Traits::vertices_size_type size_type;

  typedef std::map<std::string, Vertex> NameVertexMap;
  NameVertexMap name2vertex;
  Graph g;

  typedef property_map<Graph, vertex_name_t>::type NameMap;
  property_map<Graph, edge_name_t>::type link_name = get(edge_name, g);

  //===========================================================================
  // Read the data file and construct the graph.
  
  Edge edge;
  bool inserted;      
  
  tie(edge, inserted) = add_edge(0,1,g);
  put(vertex_name, g, source(edge,g),"0");
  put(vertex_name, g, target(edge,g),"1");
    
  tie(edge, inserted) = add_edge(1,2,g);
  put(vertex_name, g, source(edge,g),"1");
  put(vertex_name, g, target(edge,g),"2");
    
  tie(edge, inserted) = add_edge(1,3,g);
  put(vertex_name, g, source(edge,g),"1");  
  put(vertex_name, g, target(edge,g),"3");

  tie(edge, inserted) = add_edge(3,4,g);
  put(vertex_name, g, source(edge,g),"3");  
  put(vertex_name, g, target(edge,g),"4");  
  
  tie(edge, inserted) = add_edge(3,5,g);
  put(vertex_name, g, source(edge,g),"3");  
  put(vertex_name, g, target(edge,g),"5"); 
  
  tie(edge, inserted) = add_edge(2,4,g);
  put(vertex_name, g, source(edge,g),"2");  
  put(vertex_name, g, target(edge,g),"4");   

  tie(edge, inserted) = add_edge(4,6,g);
  put(vertex_name, g, source(edge,g),"4");  
  put(vertex_name, g, target(edge,g),"6");   
  //===========================================================================

  std::vector< std::vector<Edge> > paths; 
  
  Graph tmp_g = g;
  Vertex src = 0;  
  Vertex goal = 6;
  bool more = true;

  do
  {
    std::vector<Edge> path;
    OptSolPathEstimator<Graph> ospe(goal,&path);  
    
    try
    {
      depth_first_visit(tmp_g,src,ospe,get(vertex_color,tmp_g));
    }
    catch(FoundGoalSignal fgs)
    { };
    
    if( !path.empty() )
    {
      more = true;// possible there are more goal-reached paths
      paths.push_back(path);
      
      // Delete the last edge of path in the tmp_g
      remove_edge(path.back(),tmp_g);
      
      // Reset all vertex_color in tmp_g to white
      boost::graph_traits<Graph>::vertex_iterator vi, vi_end;
      for (boost::tie(vi,vi_end) = vertices(tmp_g); vi != vi_end; ++vi)
        put(vertex_color,tmp_g,*vi,color_traits<boost::default_color_type>::white());
    }
    else
    {
      more = false;
    }
  }
  while(more);

  cerr << "paths.size()= "  << paths.size() << endl;
  for(typename std::vector< std::vector<Edge> >::const_iterator i=paths.begin(); i!=paths.end(); ++i)
  {
    cerr << "Path " << i-paths.begin() << endl;
    for(std::vector<Edge>::const_iterator j=i->begin(); j!=i->end(); ++j)
    {
      cout << "e(" << source(*j,g) << "," << target(*j,g) << "), ";
    }
    cout << endl;
  } 

//  boost::graph_traits<Graph>::vertex_iterator vi, vi_end;
//  for (boost::tie(vi,vi_end) = vertices(g); vi != vi_end; ++vi)
//  {
//    if( get(vertex_color,g,*vi)==color_traits<boost::default_color_type>::gray() )
//      cerr << *vi << "= gray" << endl;
//    else if( get(vertex_color,g,*vi)==color_traits<boost::default_color_type>::black() )
//      cerr << *vi << "= black" << endl;
//    else 
//      cerr << *vi << "= white" << endl;
//  }   
  
//  cerr << "============================================================================================" << endl;
//  for (boost::tie(vi,vi_end) = vertices(tmp_g); vi != vi_end; ++vi)
//  {
//    if( get(vertex_color,tmp_g,*vi)==color_traits<boost::default_color_type>::gray() )
//      cerr << *vi << "= gray" << endl;
//    else if( get(vertex_color,tmp_g,*vi)==color_traits<boost::default_color_type>::black() )
//      cerr << *vi << "= black" << endl;
//    else 
//      cerr << *vi << "= white" << endl;
//  }   
//    
//     
//  cerr << "============================================================================================" << endl;
//    
//  src = 1;
//  paths.clear();
//  
//  depth_first_visit(g,src,ospe,get(vertex_color, g));
// 
//  cerr << "paths.size()= "  << paths.size() << endl;
//  for(typename std::vector< std::vector<Edge> >::const_iterator i=paths.begin(); i!=paths.end(); ++i)
//  {
//    cerr << "Path " << i-paths.begin() << endl;
//    for(std::vector<Edge>::const_iterator j=i->begin(); j!=i->end(); ++j)
//    {
//      cout << "e(" << source(*j,g) << "," << target(*j,g) << "), ";
//    }
//    cout << endl;
//  } 
  
  return EXIT_SUCCESS;
}
コード例 #11
0
ファイル: sloan_ordering.hpp プロジェクト: 00liujj/dealii
  OutputIterator
  sloan_ordering(Graph& g,
                 typename graph_traits<Graph>::vertex_descriptor s,
                 typename graph_traits<Graph>::vertex_descriptor e,
                 OutputIterator permutation, 
                 ColorMap color, 
                 DegreeMap degree, 
                 PriorityMap priority, 
                 Weight W1, 
                 Weight W2)
  {
    //typedef typename property_traits<DegreeMap>::value_type Degree;
    typedef typename property_traits<PriorityMap>::value_type Degree;
    typedef typename property_traits<ColorMap>::value_type ColorValue;
    typedef color_traits<ColorValue> Color;
    typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
    typedef typename std::vector<typename graph_traits<Graph>::vertices_size_type>::iterator vec_iter;
    typedef typename graph_traits<Graph>::vertices_size_type size_type;

    typedef typename property_map<Graph, vertex_index_t>::const_type VertexID;

    
    //Creating a std-vector for storing the distance from the end vertex in it
    typename std::vector<typename graph_traits<Graph>::vertices_size_type> dist(num_vertices(g), 0);
    
    //Wrap a property_map_iterator around the std::iterator
    boost::iterator_property_map<vec_iter, VertexID, size_type, size_type&> dist_pmap(dist.begin(), get(vertex_index, g)); 
    
    breadth_first_search
      (g, e, visitor
       (
           make_bfs_visitor(record_distances(dist_pmap, on_tree_edge() ) )
        )
       );
    
    //Creating a property_map for the indices of a vertex
    typename property_map<Graph, vertex_index_t>::type index_map = get(vertex_index, g);
    
    //Sets the color and priority to their initial status
    unsigned cdeg;    
    typename graph_traits<Graph>::vertex_iterator ui, ui_end;
    for (boost::tie(ui, ui_end) = vertices(g); ui != ui_end; ++ui)
    {
        put(color, *ui, Color::white());
        cdeg=get(degree, *ui)+1;
        put(priority, *ui, W1*dist[index_map[*ui]]-W2*cdeg );  
    }
    
    //Priority list
    typedef indirect_cmp<PriorityMap, std::greater<Degree> > Compare;
    Compare comp(priority);
    std::list<Vertex> priority_list;

    //Some more declarations
    typename graph_traits<Graph>::out_edge_iterator ei, ei_end, ei2, ei2_end;
    Vertex u, v, w;

    put(color, s, Color::green());      //Sets the color of the starting vertex to gray
    priority_list.push_front(s);                 //Puts s into the priority_list
    
    while ( !priority_list.empty() ) 
    {  
      priority_list.sort(comp);         //Orders the elements in the priority list in an ascending manner
      
      u = priority_list.front();           //Accesses the last element in the priority list
      priority_list.pop_front();               //Removes the last element in the priority list
      
      if(get(color, u) == Color::green() )
      {
        //for-loop over all out-edges of vertex u
        for (boost::tie(ei, ei_end) = out_edges(u, g); ei != ei_end; ++ei) 
        {
          v = target(*ei, g);
          
          put( priority, v, get(priority, v) + W2 ); //updates the priority
          
          if (get(color, v) == Color::white() )      //test if the vertex is inactive
          {
            put(color, v, Color::green() );        //giving the vertex a preactive status
            priority_list.push_front(v);                     //writing the vertex in the priority_queue
          }           
        }
      }
      
      //Here starts step 8
      *permutation++ = u;                      //Puts u to the first position in the permutation-vector
      put(color, u, Color::black() );          //Gives u an inactive status
      
      //for loop over all the adjacent vertices of u
      for (boost::tie(ei, ei_end) = out_edges(u, g); ei != ei_end; ++ei) {
        
        v = target(*ei, g);     
        
        if (get(color, v) == Color::green() ) {      //tests if the vertex is inactive
          
          put(color, v, Color::red() );        //giving the vertex an active status
          put(priority, v, get(priority, v)+W2);  //updates the priority        
          
          //for loop over alll adjacent vertices of v
          for (boost::tie(ei2, ei2_end) = out_edges(v, g); ei2 != ei2_end; ++ei2) {
            w = target(*ei2, g);
            
            if(get(color, w) != Color::black() ) {     //tests if vertex is postactive
              
              put(priority, w, get(priority, w)+W2);  //updates the priority
              
              if(get(color, w) == Color::white() ){
                
                put(color, w, Color::green() );   // gives the vertex a preactive status
                priority_list.push_front(w);           // puts the vertex into the priority queue
                
              } //end if
              
            } //end if
            
          } //end for
          
        } //end if
        
      } //end for
      
    } //end while
    
    
    return permutation;
  }  
コード例 #12
0
  void FindPeaksMD::findPeaks(typename MDEventWorkspace<MDE, nd>::sptr ws)
  {
    if (nd < 3)
      throw std::invalid_argument("Workspace must have at least 3 dimensions.");

    progress(0.01, "Refreshing Centroids");

    // TODO: This might be slow, progress report?
    // Make sure all centroids are fresh
    ws->getBox()->refreshCentroid();

    typedef IMDBox<MDE,nd>* boxPtr;

    if (ws->getNumExperimentInfo() == 0)
      throw std::runtime_error("No instrument was found in the MDEventWorkspace. Cannot find peaks.");

    // TODO: Do we need to pick a different instrument info?
    ExperimentInfo_sptr ei = ws->getExperimentInfo(0);
    // Instrument associated with workspace
    Geometry::Instrument_const_sptr inst = ei->getInstrument();
    // Find the run number
    int runNumber = ei->getRunNumber();

    // Check that the workspace dimensions are in Q-sample-frame or Q-lab-frame.
    eDimensionType dimType;

    std::string dim0 = ws->getDimension(0)->getName();
    if (dim0 == "H")
    {
      dimType = HKL;
      throw std::runtime_error("Cannot find peaks in a workspace that is already in HKL space.");
    }
    else if (dim0 == "Q_lab_x")
    {
      dimType = QLAB;
    }
    else if (dim0 == "Q_sample_x")
      dimType = QSAMPLE;
    else
      throw std::runtime_error("Unexpected dimensions: need either Q_lab_x or Q_sample_x.");

    // Find the goniometer rotation matrix
    Mantid::Kernel::Matrix<double> goniometer(3,3, true); // Default IDENTITY matrix
    try
    {
      goniometer = ei->mutableRun().getGoniometerMatrix();
    }
    catch (std::exception & e)
    {
      g_log.warning() << "Error finding goniometer matrix. It will not be set in the peaks found." << std::endl;
      g_log.warning() << e.what() << std::endl;
    }

    /// Arbitrary scaling factor for density to make more manageable numbers, especially for older file formats.
    signal_t densityScalingFactor = 1e-6;

    // Calculate a threshold below which a box is too diffuse to be considered a peak.
    signal_t thresholdDensity = 0.0;
    thresholdDensity = ws->getBox()->getSignalNormalized() * DensityThresholdFactor * densityScalingFactor;
    g_log.notice() << "Threshold signal density: " << thresholdDensity << std::endl;

    // We will fill this vector with pointers to all the boxes (up to a given depth)
    typename std::vector<boxPtr> boxes;

    // Get all the MDboxes
    progress(0.10, "Getting Boxes");
    ws->getBox()->getBoxes(boxes, 1000, true);



    // TODO: Here keep only the boxes > e.g. 3 * mean.
    typedef std::pair<double, boxPtr> dens_box;

    // Map that will sort the boxes by increasing density. The key = density; value = box *.
    typename std::multimap<double, boxPtr> sortedBoxes;

    progress(0.20, "Sorting Boxes by Density");
    typename std::vector<boxPtr>::iterator it1;
    typename std::vector<boxPtr>::iterator it1_end = boxes.end();
    for (it1 = boxes.begin(); it1 != it1_end; it1++)
    {
      boxPtr box = *it1;
      double density = box->getSignalNormalized() * densityScalingFactor;
      // Skip any boxes with too small a signal density.
      if (density > thresholdDensity)
        sortedBoxes.insert(dens_box(density,box));
    }

    // List of chosen possible peak boxes.
    std::vector<boxPtr> peakBoxes;

    prog = new Progress(this, 0.30, 0.95, MaxPeaks);

    int64_t numBoxesFound = 0;
    // Now we go (backwards) through the map
    // e.g. from highest density down to lowest density.
    typename std::multimap<double, boxPtr>::reverse_iterator it2;
    typename std::multimap<double, boxPtr>::reverse_iterator it2_end = sortedBoxes.rend();
    for (it2 = sortedBoxes.rbegin(); it2 != it2_end; it2++)
    {
      signal_t density = it2->first;
      boxPtr box = it2->second;
#ifndef MDBOX_TRACK_CENTROID
      coord_t boxCenter[nd];
      box->calculateCentroid(boxCenter);
#else
      const coord_t * boxCenter = box->getCentroid();
#endif

      // Compare to all boxes already picked.
      bool badBox = false;
      for (typename std::vector<boxPtr>::iterator it3=peakBoxes.begin(); it3 != peakBoxes.end(); it3++)
      {

#ifndef MDBOX_TRACK_CENTROID
        coord_t otherCenter[nd];
        (*it3)->calculateCentroid(otherCenter);
#else
        const coord_t * otherCenter = (*it3)->getCentroid();
#endif

        // Distance between this box and a box we already put in.
        coord_t distSquared = 0.0;
        for (size_t d=0; d<nd; d++)
        {
          coord_t dist = otherCenter[d] - boxCenter[d];
          distSquared += (dist * dist);
        }

        // Reject this box if it is too close to another previously found box.
        if (distSquared < peakRadiusSquared)
        {
          badBox = true;
          break;
        }
      }

      // The box was not rejected for another reason.
      if (!badBox)
      {
        if (numBoxesFound++ >= MaxPeaks)
        {
          g_log.notice() << "Number of peaks found exceeded the limit of " << MaxPeaks << ". Stopping peak finding." << std::endl;
          break;
        }

        peakBoxes.push_back(box);
        g_log.information() << "Found box at ";
        for (size_t d=0; d<nd; d++)
          g_log.information() << (d>0?",":"") << boxCenter[d];
        g_log.information() << "; Density = " << density << std::endl;
        // Report progres for each box found.
        prog->report("Finding Peaks");
      }
    }

    prog->resetNumSteps(numBoxesFound, 0.95, 1.0);

    // Copy the instrument, sample, run to the peaks workspace.
    peakWS->copyExperimentInfoFrom(ei.get());

    // --- Convert the "boxes" to peaks ----
    for (typename std::vector<boxPtr>::iterator it3=peakBoxes.begin(); it3 != peakBoxes.end(); it3++)
    {
      // The center of the box = Q in the lab frame
      boxPtr box = *it3;
#ifndef MDBOX_TRACK_CENTROID
      coord_t boxCenter[nd];
      box->calculateCentroid(boxCenter);
#else
      const coord_t * boxCenter = box->getCentroid();
#endif

      V3D Q(boxCenter[0], boxCenter[1], boxCenter[2]);

      // Create a peak and add it
      // Empty starting peak.
      Peak p;
      try
      {
        if (dimType == QLAB)
        {
          // Build using the Q-lab-frame constructor
          p = Peak(inst, Q);
          // Save gonio matrix for later
          p.setGoniometerMatrix(goniometer);
        }
        else if (dimType == QSAMPLE)
        {
          // Build using the Q-sample-frame constructor
          p = Peak(inst, Q, goniometer);
        }
      }
      catch (std::exception &e)
      {
        g_log.notice() << "Error creating peak at " << Q << " because of '" << e.what() << "'. Peak will be skipped." << std::endl;
        continue;
      }

      try
      { // Look for a detector
        p.findDetector();
      }
      catch (...)
      { /* Ignore errors in ray-tracer TODO: Handle for WISH data later */ }

      // The "bin count" used will be the box density.
      p.setBinCount( box->getSignalNormalized() * densityScalingFactor);

      // Save the run number found before.
      p.setRunNumber(runNumber);

      peakWS->addPeak(p);

      // Report progres for each box found.
      prog->report("Adding Peaks");

    } // for each box found

  }
コード例 #13
0
template <  typename IN_PORT_TYPE, typename OUT_PORT_TYPE > int randomizer_base::_transformerServiceFunction( typename  std::vector< gr_istream< IN_PORT_TYPE > > &istreams ,
    typename  std::vector< gr_ostream< OUT_PORT_TYPE > > &ostreams  )
{
    typedef typename std::vector< gr_istream< IN_PORT_TYPE > >   _IStreamList;
    typedef typename std::vector< gr_ostream< OUT_PORT_TYPE > >  _OStreamList;

    boost::mutex::scoped_lock lock(serviceThreadLock);

    if ( validGRBlock() == false ) {

        // create our processing block, and setup  property notifiers
        createBlock();

        LOG_DEBUG( randomizer_base, " FINISHED BUILDING  GNU RADIO BLOCK");
    }
 
    //process any Stream ID changes this could affect number of io streams
    processStreamIdChanges();

    if ( !validGRBlock() || istreams.size() == 0 || ostreams.size() == 0  ) {
        LOG_WARN( randomizer_base, "NO STREAMS ATTACHED TO BLOCK..." );
        return NOOP;
    }

    _input_ready.resize( istreams.size() );
    _ninput_items_required.resize( istreams.size() );
    _ninput_items.resize( istreams.size() );
    _input_items.resize( istreams.size() );
    _output_items.resize( ostreams.size() );

    //
    // RESOLVE: need to look at forecast strategy, 
    //    1)  see how many read items are necessary for N number of outputs
    //    2)  read input data and see how much output we can produce
    //

    //
    // Grab available data from input streams
    //
    typename _OStreamList::iterator ostream;
    typename _IStreamList::iterator istream = istreams.begin();
    int nitems=0;
    for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) {
        // note this a blocking read that can cause deadlocks
        nitems = istream->read();
    
        if ( istream->overrun() ) {
            LOG_WARN( randomizer_base, " NOT KEEPING UP WITH STREAM ID:" << istream->streamID );
        }

        if ( istream->sriChanged() ) {
            // RESOLVE - need to look at how SRI changes can affect Gnu Radio BLOCK state
            LOG_DEBUG( randomizer_base, "SRI CHANGED, STREAMD IDX/ID: " 
                      << idx << "/" << istream->pkt->streamID );
            setOutputStreamSRI( idx, istream->pkt->SRI );
        }
    }

    LOG_TRACE( randomizer_base, "READ NITEMS: "  << nitems );
    if ( nitems <= 0 && !_istreams[0].eos() ) {
        return NOOP;
    }

    bool eos = false;
    int  nout = 0;
    bool workDone = false;

    while ( nout > -1 && serviceThread->threadRunning() ) {
        eos = false;
        nout = _forecastAndProcess( eos, istreams, ostreams );
        if ( nout > -1  ) {
            workDone = true;

            // we chunked on data so move read pointer..
            istream = istreams.begin();
            for ( ; istream != istreams.end(); istream++ ) {
                int idx=std::distance( istreams.begin(), istream );
                // if we processed data for this stream
                if ( _input_ready[idx] ) {
                    size_t nitems = 0;
                    try {
                        nitems = gr_sptr->nitems_read( idx );
                    } catch(...){}
      
                    if ( nitems > istream->nitems() ) {
                        LOG_WARN( randomizer_base,  "WORK CONSUMED MORE DATA THAN AVAILABLE,  READ/AVAILABLE "
                                 << nitems << "/" << istream->nitems() );
                        nitems = istream->nitems();
                    }
                    istream->consume( nitems );
                    LOG_TRACE( randomizer_base, " CONSUME READ DATA  ITEMS/REMAIN " << nitems << "/" << istream->nitems());
                }
            }
            gr_sptr->reset_read_index();
        }

        // check for not enough data return
        if ( nout == -1 ) {

            // check for  end of stream
            istream = istreams.begin();
            for ( ; istream != istreams.end() ; istream++) {
                if ( istream->eos() ) {
                    eos=true;
                }
            }
            if ( eos ) {
                LOG_TRACE(  randomizer_base, "EOS SEEN, SENDING DOWNSTREAM " );
                _forecastAndProcess( eos, istreams, ostreams);
            }
        }
    }

    if ( eos ) {
        istream = istreams.begin();
        for ( ; istream != istreams.end() ; istream++ ) {
            int idx=std::distance( istreams.begin(), istream );
            LOG_DEBUG( randomizer_base, " CLOSING INPUT STREAM IDX:" << idx );
            istream->close();
        }

        // close remaining output streams
        ostream = ostreams.begin();
        for ( ; eos && ostream != ostreams.end(); ostream++ ) {
            int idx=std::distance( ostreams.begin(), ostream );
            LOG_DEBUG( randomizer_base, " CLOSING OUTPUT STREAM IDX:" << idx );
            ostream->close();
        }
    }

    //
    // set the read pointers of the GNU Radio Block to start at the beginning of the 
    // supplied buffers
    //
    gr_sptr->reset_read_index();

    LOG_TRACE( randomizer_base, " END OF TRANSFORM SERVICE FUNCTION....." << noutput_items );

    if ( nout == -1 && eos == false && !workDone ) {
        return NOOP;
    } else {
        return NORMAL;
    }
}
コード例 #14
0
ファイル: utils.hpp プロジェクト: ParBLiSS/metag_partitioning
void generatePartitionSizeHistogram(typename std::vector<T>& localVector, std::string filename, MPI_Comm comm = MPI_COMM_WORLD)
{
  /*
   * Approach :
   * 1. Do a global sort by keyLayer (Partition id).
   * 2. Compute the information Partition_Id:Size for boundary partitions (Avoid duplication).
   * 3. Do an all_gather of boundary values plugged with ranks.
   * 4. All boundary partitions should be owned by minimum rank that shares it.
   * 5. Locally compute the largest partition size and do MPI_Allreduce 
   *    with MPI_MAX.
   * 6. Build the histogram locally. 
   * 7. Finally do MPI_Reduce over whole histogram to root node.
   * 8. Write the output to file
   */

  int p;
  int rank;
  MPI_Comm_size(comm, &p);
  MPI_Comm_rank(comm, &rank);

  static layer_comparator<keyLayer, T> pccomp;

  //Sort the vector by each tuple's keyLayer element
  mxx::sort(localVector.begin(), localVector.end(), pccomp, comm, false);

  //Iterate over tuples to compute boundary information
  
  bool iownLeftBucket, iownRightBucket, onlySingleLocalPartition;
  //Type of tuple to communicate : (Partition Id, rank, size)
  typedef std::tuple<uint32_t, int, uint64_t> tupletypeforbucketSize;
  std::vector<tupletypeforbucketSize> toSend;
  toSend.resize(2);

  //Find the left most bucket
  auto leftBucketRange = findRange(localVector.begin(), localVector.end(), *(localVector.begin()), pccomp);
  std::get<0>(toSend[0]) = std::get<keyLayer>(*localVector.begin()); 
  std::get<1>(toSend[0]) = rank;
  std::get<2>(toSend[0]) = leftBucketRange.second - leftBucketRange.first;

  //Find the right most bucket
  auto rightBucketRange = findRange(localVector.rbegin(), localVector.rend(), *(localVector.rbegin()), pccomp);
  std::get<0>(toSend[1]) = std::get<keyLayer>(*localVector.rbegin()); 
  std::get<1>(toSend[1]) = rank;
  std::get<2>(toSend[1]) = rightBucketRange.second - rightBucketRange.first;

  //If we have only single partition, make sure we are not creating duplicates
  if(std::get<0>(toSend[0]) == std::get<0>(toSend[1]))
  {
    //Make second send element's size zero
    std::get<2>(toSend[1]) = 0;
    onlySingleLocalPartition = true;
  }
  else
    onlySingleLocalPartition = false;

  //Gather all the boundary information
  auto allBoundaryPartitionSizes = mxx::allgather_vectors(toSend, comm);

  uint64_t leftBucketSize = 0;
  uint64_t rightBucketSize = 0;

  //Need to parse boundary information that matches the partitionIds we have
  static layer_comparator<0, tupletypeforbucketSize> pccomp2;
  auto leftBucketBoundaryRange = std::equal_range(allBoundaryPartitionSizes.begin(), allBoundaryPartitionSizes.end(), toSend[0], pccomp2);

  //Check if this processor owns this bucket
  if(std::get<1>(*(leftBucketBoundaryRange.first)) == rank)
  {
    iownLeftBucket = true;
    for(auto it = leftBucketBoundaryRange.first; it != leftBucketBoundaryRange.second; it++)
    {
      leftBucketSize += std::get<2>(*it);
    }
  }
  else
    iownLeftBucket = false;

  auto rightBucketBoundaryRange = std::equal_range(allBoundaryPartitionSizes.begin(), allBoundaryPartitionSizes.end(), toSend[1], pccomp2);

  //Check if this processor owns right partition
  if(std::get<1>(*rightBucketBoundaryRange.first) == rank && !onlySingleLocalPartition)
  {
    iownRightBucket = true;
    for(auto it = rightBucketBoundaryRange.first; it != rightBucketBoundaryRange.second; it++)
    {
      rightBucketSize += std::get<2>(*it);
    }
  }
  else
    iownRightBucket = false;

  //Map from partition size to count 
  typedef std::map <uint64_t, uint32_t> MapType;
  MapType localHistMap;

  for(auto it = localVector.begin(); it!= localVector.end();)  // iterate over all segments.
  {
    auto innerLoopBound = findRange(it, localVector.end(), *it, pccomp);

    //Left most bucket
    if (innerLoopBound.first == localVector.begin()) // first
    {
      if(iownLeftBucket)
        insertToHistogram(localHistMap, leftBucketSize);
    }
    //Right most bucket
    else if (innerLoopBound.second == localVector.end()) // first
    {
      if(iownRightBucket)
        insertToHistogram(localHistMap, rightBucketSize);
    }
    //Inner buckets
    else
    {
      insertToHistogram(localHistMap, innerLoopBound.second - innerLoopBound.first);
    }

    it = innerLoopBound.second;
  }

  //Convert map to vector
  using tupleTypeforHist = std::tuple<uint64_t, uint32_t>;
  std::vector<tupleTypeforHist> localHistVector;

  for(MapType::iterator it = localHistMap.begin(); it != localHistMap.end(); ++it ) 
  {
    localHistVector.push_back(std::make_tuple(it->first, it->second));
  }

  //Gather vector from all processors to root
  auto globalHistVector = mxx::gather_vectors(localHistVector, comm);
  static layer_comparator<0, tupleTypeforHist> partition_size_cmp;

  //Write to file
  if(rank == 0)
  {
    //Sort the vector to bring counts with same size adjacent (default comparator will work)
    std::sort(globalHistVector.begin(), globalHistVector.end());

    std::ofstream ofs;
    ofs.open(filename, std::ios_base::out);

    //Iterate over the global vector
    for(auto it = globalHistVector.begin(); it != globalHistVector.end();)
    {
      //Range of counts belonging to same partition size
      auto innerLoopRange = findRange(it, globalHistVector.end(), *it, partition_size_cmp);
      auto p_size = std::get<0>(*it);
      uint32_t sum = 0;

      //Loop over the range
      for(auto it2 = innerLoopRange.first; it2 != innerLoopRange.second; it2++)
        sum += std::get<1>(*it2);

      ofs << p_size << " " << sum <<"\n";
      it = innerLoopRange.second;
    }

    ofs.close();

  }
}
コード例 #15
0
			void fromPointCloud( const typename std::vector<Celer::Vector4<Real> >& points,
			                     const Celer::Vector3<Real>& first_basis,
			                     const Celer::Vector3<Real>& second_basis,
			                     const Celer::Vector3<Real>& third_basis )
			{
				// Reference :
				// Geometric Tools, LLC
				// Copyright (c) 1998-2012
				// Distributed under the Boost Software License, Version 1.0.
				// Wm5ContBox3.cpp

				basis_[0] = first_basis;
				basis_[1] = second_basis;
				basis_[2] = third_basis;


				for ( typename std::vector<Celer::Vector4<Real> >::const_iterator new_point = points.begin(); new_point != points.end(); new_point++)
				{

					 BoundingBox3<Real> box( std::min ( min_.x , new_point->x ) ,
								 std::min ( min_.y , new_point->y ) ,
								 std::min ( min_.z , new_point->z ) ,
								 std::max ( max_.x , new_point->x ) ,
								 std::max ( max_.y , new_point->y ) ,
								 std::max ( max_.z , new_point->z ) );

//						std::cout << min( );
//						std::cout << max( );
//						std::cout << " new_point " << *new_point;

					 *this = *this + box;
				}

				Celer::Vector3<Real> diff = points[0].toVector3() - this->center();
				Celer::Vector3<Real> pmin ( ( diff * basis_[0] ) , ( diff * basis_[1] ) , (diff * basis_[2]) );
				Celer::Vector3<Real> pmax = pmin;


				for ( typename std::vector<Celer::Vector4<Real> >::const_iterator new_point = points.begin(); new_point != points.end(); new_point++)
				{
//					diff = points[i] - box.Center;
//					for ( int j = 0; j < 3; ++j )
//					{
//						Real dot = diff.Dot ( box.Axis[j] );
//						if ( dot < pmin[j] )
//						{
//							pmin[j] = dot;
//						}
//						else if ( dot > pmax[j] )
//						{
//							pmax[j] = dot;
//						}
//					}

				}


//				for ( int i = 1; i < numPoints; ++i )
//				{
//				}

//				min_ += ( ( (Real) 0.5 ) * ( pmin[0] + pmax[0] ) ) * box.Axis[0] + ( ( (Real) 0.5 ) * ( pmin[1] + pmax[1] ) ) * box.Axis[1] + ( ( (Real) 0.5 ) * ( pmin[2] + pmax[2] ) ) * box.Axis[2];

			}
コード例 #16
0
template <  typename IN_PORT_TYPE > int file_descriptor_sink_i_base::_forecastAndProcess( bool &eos, typename  std::vector< gr_istream< IN_PORT_TYPE > > &istreams )
{
    typedef typename std::vector< gr_istream< IN_PORT_TYPE > >   _IStreamList;

    typename _IStreamList::iterator istream = istreams.begin();
    int nout = 0;
    bool dataReady = false;
    if ( !eos ) {
        uint64_t max_items_avail = 0;
        for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) {
            LOG_TRACE( file_descriptor_sink_i_base, "GET MAX ITEMS: STREAM:" << idx << " NITEMS/SCALARS:"
                      << istream->nitems() << "/" << istream->_data.size() );
            max_items_avail = std::max( istream->nitems(), max_items_avail );
        }

        //
        // calc number of output items to produce
        //
        noutput_items = (int) (max_items_avail * gr_sptr->relative_rate ());
        noutput_items = round_down (noutput_items, gr_sptr->output_multiple ());

        if ( noutput_items <= 0  ) {
           LOG_TRACE( file_descriptor_sink_i_base, "DATA CHECK - MAX ITEMS  NOUTPUT/MAX_ITEMS:" <<   noutput_items << "/" << max_items_avail);
           return -1;
        }

        if ( gr_sptr->fixed_rate() ) {
            istream = istreams.begin();
            for ( int i=0; istream != istreams.end(); i++, istream++ ) {
                int t_noutput_items = gr_sptr->fixed_rate_ninput_to_noutput( istream->nitems() );
                if ( gr_sptr->output_multiple_set() ) {
                    t_noutput_items = round_up(t_noutput_items, gr_sptr->output_multiple());
                }
                if ( t_noutput_items > 0 ) {
                    if ( noutput_items == 0 ) {
                        noutput_items = t_noutput_items;
                    }
                    if ( t_noutput_items <= noutput_items ) {
                        noutput_items = t_noutput_items;
                    }
                }
            }
            LOG_TRACE( file_descriptor_sink_i_base, " FIXED FORECAST NOUTPUT/output_multiple == " 
                      << noutput_items  << "/" << gr_sptr->output_multiple());
        }

        //
        // ask the block how much input they need to produce noutput_items...
        // if enough data is available to process then set the dataReady flag
        //
        int32_t  outMultiple = gr_sptr->output_multiple();
        while ( !dataReady && noutput_items >= outMultiple  ) {
            //
            // ask the block how much input they need to produce noutput_items...
            //
            gr_sptr->forecast(noutput_items, _ninput_items_required);

            LOG_TRACE( file_descriptor_sink_i_base, "--> FORECAST IN/OUT " << _ninput_items_required[0]  << "/" << noutput_items  );

            istream = istreams.begin();
            uint32_t dr_cnt=0;
            for ( int idx=0 ; noutput_items > 0 && istream != istreams.end(); idx++, istream++ ) {
                // check if buffer has enough elements
                _input_ready[idx] = false;
                if ( istream->nitems() >= (uint64_t)_ninput_items_required[idx] ) {
                    _input_ready[idx] = true;
                    dr_cnt++;
                }
                LOG_TRACE( file_descriptor_sink_i_base, "ISTREAM DATACHECK NELMS/NITEMS/REQ/READY:" << 
                          istream->nelems() << "/" << istream->nitems() << "/" << 
                          _ninput_items_required[idx] << "/" << _input_ready[idx]);
            }
    
            if ( dr_cnt < istreams.size() ) {
                if ( outMultiple > 1 ) {
                    noutput_items -= outMultiple;
                } else {
                    noutput_items /= 2;
                }
            } else {
                dataReady = true;
            }
            LOG_TRACE( file_descriptor_sink_i_base, " TRIM FORECAST NOUTPUT/READY " << noutput_items << "/" << dataReady );
        }

        // check if data is ready...
        if ( !dataReady ) {
            LOG_TRACE( file_descriptor_sink_i_base, "DATA CHECK - NOT ENOUGH DATA  AVAIL/REQ:" 
                      <<   _istreams[0].nitems() << "/" << _ninput_items_required[0] );
            return -1;
        }

        // reset looping variables
        int  ritems = 0;
        int  nitems = 0;

        // reset caching vectors
        _output_items.clear();
        _input_items.clear();
        _ninput_items.clear();
        istream = istreams.begin();

        for ( int idx=0 ; istream != istreams.end(); idx++, istream++ ) {
            // check if the stream is ready
            if ( !_input_ready[idx] ) continue;
      
            // get number of items remaining
            try {
                ritems = gr_sptr->nitems_read( idx );
            } catch(...){
                // something bad has happened, we are missing an input stream
                LOG_ERROR( file_descriptor_sink_i_base, "MISSING INPUT STREAM FOR GR BLOCK, STREAM ID:" <<   istream->streamID );
                return -2;
            } 

            nitems = istream->nitems() - ritems;
            LOG_TRACE( file_descriptor_sink_i_base,  " ISTREAM: IDX:" << idx  << " ITEMS AVAIL/READ/REQ " << nitems << "/" 
                      << ritems << "/" << _ninput_items_required[idx] );
            if ( nitems >= _ninput_items_required[idx] && nitems > 0 ) {
                //remove eos checks ...if ( nitems < _ninput_items_required[idx] ) nitems=0;
                _ninput_items.push_back( nitems );
                _input_items.push_back( (const void *) (istream->read_pointer(ritems)) );
            }
        }

        nout=0;
        if ( _input_items.size() != 0 && serviceThread->threadRunning() ) {
            LOG_TRACE( file_descriptor_sink_i_base, " CALLING WORK.....N_OUT:" << noutput_items << 
                      " N_IN:" << nitems << " ISTREAMS:" << _input_items.size() << 
                      " OSTREAMS:" << _output_items.size());
            nout = gr_sptr->general_work( noutput_items, _ninput_items, _input_items, _output_items);

            // sink/analyzer patterns do not return items, so consume_each is not called in Gnu Radio BLOCK
            if ( nout == 0 ) {
                gr_sptr->consume_each(nitems);
            }

            LOG_TRACE( file_descriptor_sink_i_base, "RETURN  WORK ..... N_OUT:" << nout);
        }

        // check for stop condition from work method
        if ( nout < gr_block::WORK_DONE ) {
            LOG_WARN( file_descriptor_sink_i_base, "WORK RETURNED STOP CONDITION..." << nout );
            nout=0;
            eos = true;
        }
    }

    return nout;
 
}
コード例 #17
0
void VisusIndexedData::addText(XMLNode& node, const std::vector<std::vector<T > >* items) const
{
  // Count max number of items
  int maxitems = 0;
  for (typename std::vector<std::vector<T> >::const_iterator iiter=items->begin(); 
       iiter!=items->end(); ++iiter) 
      maxitems += iiter->size() + 1;

  // Construct contiguous buffer
  const int bufsize = sizeof(T) * maxitems;
  unsigned char* buffer = new unsigned char[bufsize+1];

  node.addAttribute("numItems", (long) items->size());
  node.addAttribute("bufsize", bufsize);

  int position = 0;
  // Copy non-contiguous data into contiguous buffer
  for (typename std::vector<std::vector<T> >::const_iterator iiter=items->begin(); 
       iiter!=items->end(); ++iiter) 
  {
    // Save Num Items
    T value = items->size();
    memcpy(&buffer[position], &value, sizeof(T));
    position += sizeof(T);

    // Load Vector's Vector of items into buffer
    for (typename std::vector<T>::const_iterator idtIter=iiter->begin(); 
       idtIter!=iiter->end(); ++idtIter) 
    {
       vassert(position < bufsize);
       memcpy(&buffer[position], &(*idtIter), sizeof(T));
       position += sizeof(T);
    }
  }
  vassert(position == bufsize);

  // Save Buffer out to XML
  switch (VisusXMLInterface::sWriteXMLDataStorage)
  {
    case BASE64:
    {
      // Save data as BASE64
      XMLParserBase64Tool base64;
      XMLSTR encoded = base64.encode(buffer, bufsize);
      node.addText(encoded);
    }
    break;
    case EXTERNAL_FILE:
    {
      vwarning("saving data to external file is not yet supported");
    }
    break;
    case ASCII:
    {
      vwarning("saving data to external file is not yet supported");
    }
    break;
  }

  delete [] buffer;
}
コード例 #18
0
template <  typename IN_PORT_TYPE, typename OUT_PORT_TYPE > int randomizer_base::_forecastAndProcess( bool &eos, typename  std::vector< gr_istream< IN_PORT_TYPE > > &istreams ,
                                 typename  std::vector< gr_ostream< OUT_PORT_TYPE > > &ostreams  )
{
    typedef typename std::vector< gr_istream< IN_PORT_TYPE > >   _IStreamList;
    typedef typename std::vector< gr_ostream< OUT_PORT_TYPE > >  _OStreamList;

    typename _OStreamList::iterator ostream;
    typename _IStreamList::iterator istream = istreams.begin();
    int nout = 0;
    bool dataReady = false;
    if ( !eos ) {
        uint64_t max_items_avail = 0;
        for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) {
            LOG_TRACE( randomizer_base, "GET MAX ITEMS: STREAM:"<< idx << " NITEMS/SCALARS:" << 
                       istream->nitems() << "/" << istream->_data.size() );
            max_items_avail = std::max( istream->nitems(), max_items_avail );
        }

        if ( max_items_avail == 0  ) {
            LOG_TRACE( randomizer_base, "DATA CHECK - MAX ITEMS  NOUTPUT/MAX_ITEMS:" <<   noutput_items << "/" << max_items_avail);
            return -1;
        }

        //
        // calc number of output elements based on input items available
        //
        noutput_items = 0;
        if ( !gr_sptr->fixed_rate() )  {
            noutput_items = round_down((int32_t) (max_items_avail * gr_sptr->relative_rate()), gr_sptr->output_multiple());
            LOG_TRACE( randomizer_base, " VARIABLE FORECAST NOUTPUT == " << noutput_items );
        } else {
            istream = istreams.begin();
            for ( int i=0; istream != istreams.end(); i++, istream++ ) {
                int t_noutput_items = gr_sptr->fixed_rate_ninput_to_noutput( istream->nitems() );
                if ( gr_sptr->output_multiple_set() ) {
                    t_noutput_items = round_up(t_noutput_items, gr_sptr->output_multiple());
                }
                if ( t_noutput_items > 0 ) {
                    if ( noutput_items == 0 ) {
                        noutput_items = t_noutput_items;
                    }
                    if ( t_noutput_items <= noutput_items ) {
                        noutput_items = t_noutput_items;
                    }
                }
            }
            LOG_TRACE( randomizer_base,  " FIXED FORECAST NOUTPUT/output_multiple == " << 
                        noutput_items  << "/" << gr_sptr->output_multiple());
        }

        //
        // ask the block how much input they need to produce noutput_items...
        // if enough data is available to process then set the dataReady flag
        //
        int32_t  outMultiple = gr_sptr->output_multiple();
        while ( !dataReady && noutput_items >= outMultiple  ) {
            //
            // ask the block how much input they need to produce noutput_items...
            //
            gr_sptr->forecast(noutput_items, _ninput_items_required);

            LOG_TRACE( randomizer_base, "--> FORECAST IN/OUT " << _ninput_items_required[0]  << "/" << noutput_items  );

            istream = istreams.begin();
            uint32_t dr_cnt=0;
            for ( int idx=0 ; noutput_items > 0 && istream != istreams.end(); idx++, istream++ ) {
                // check if buffer has enough elements
                _input_ready[idx] = false;
                if ( istream->nitems() >= (uint64_t)_ninput_items_required[idx] ) {
                    _input_ready[idx] = true;
                    dr_cnt++;
                }
                LOG_TRACE( randomizer_base, "ISTREAM DATACHECK NELMS/NITEMS/REQ/READY:" <<   istream->nelems() << 
                          "/" << istream->nitems() << "/" << _ninput_items_required[idx] << "/" << _input_ready[idx]);
            }
    
            if ( dr_cnt < istreams.size() ) {
                if ( outMultiple > 1 ) {
                    noutput_items -= outMultiple;
                } else {
                    noutput_items /= 2;
                }
            } else {
                dataReady = true;
            }
            LOG_TRACE( randomizer_base, " TRIM FORECAST NOUTPUT/READY " << noutput_items << "/" << dataReady );
        }

        // check if data is ready...
        if ( !dataReady ) {
            LOG_TRACE( randomizer_base, "DATA CHECK - NOT ENOUGH DATA  AVAIL/REQ:" <<   _istreams[0].nitems() << 
                      "/" << _ninput_items_required[0] );
            return -1;
        }

        // reset looping variables
        int  ritems = 0;
        int  nitems = 0;

        // reset caching vectors
        _output_items.clear();
        _input_items.clear();
        _ninput_items.clear();
        istream = istreams.begin();

        for ( int idx=0 ; istream != istreams.end(); idx++, istream++ ) {
            // check if the stream is ready
            if ( !_input_ready[idx] ) {
                continue;
            }
            // get number of items remaining
            try {
                ritems = gr_sptr->nitems_read( idx );
            } catch(...){
                // something bad has happened, we are missing an input stream
                LOG_ERROR( randomizer_base, "MISSING INPUT STREAM FOR GR BLOCK, STREAM ID:" <<   istream->streamID );
                return -2;
            } 
    
            nitems = istream->nitems() - ritems;
            LOG_TRACE( randomizer_base,  " ISTREAM: IDX:" << idx  << " ITEMS AVAIL/READ/REQ " << nitems << "/" 
                       << ritems << "/" << _ninput_items_required[idx] );
            if ( nitems >= _ninput_items_required[idx] && nitems > 0 ) {
                //remove eos checks ...if ( nitems < _ninput_items_required[idx] ) nitems=0;
                _ninput_items.push_back( nitems );
                _input_items.push_back( (const void *) (istream->read_pointer(ritems)) );
            }
        }

        //
        // setup output buffer vector based on noutput..
        //
        ostream = ostreams.begin();
        for( ; ostream != ostreams.end(); ostream++ ) {
            ostream->resize(noutput_items);
            _output_items.push_back((void*)(ostream->write_pointer()) );
        }

        nout=0;
        if ( _input_items.size() != 0 && serviceThread->threadRunning() ) {
            LOG_TRACE( randomizer_base, " CALLING WORK.....N_OUT:" << noutput_items << " N_IN:" << nitems 
                      << " ISTREAMS:" << _input_items.size() << " OSTREAMS:" << _output_items.size());
            nout = gr_sptr->general_work( noutput_items, _ninput_items, _input_items, _output_items);
            LOG_TRACE( randomizer_base, "RETURN  WORK ..... N_OUT:" << nout);
        }

        // check for stop condition from work method
        if ( nout < gr_block::WORK_DONE ) {
            LOG_WARN( randomizer_base, "WORK RETURNED STOP CONDITION..." << nout );
            nout=0;
            eos = true;
        }
    }

    if (nout != 0 or eos ) {
        noutput_items = nout;
        LOG_TRACE( randomizer_base, " WORK RETURNED: NOUT : " << nout << " EOS:" << eos);
        ostream = ostreams.begin();
        typename IN_PORT_TYPE::dataTransfer *pkt=NULL;
        for ( int idx=0 ; ostream != ostreams.end(); idx++, ostream++ ) {

            pkt=NULL;
            int inputIdx = idx;
            if ( (size_t)(inputIdx) >= istreams.size() ) {
                for ( inputIdx= istreams.size()-1; inputIdx > -1; inputIdx--) {
                    if ( istreams[inputIdx].pkt != NULL ) {
                        pkt = istreams[inputIdx].pkt;
                        break;
                    }
                }
            } else {
                pkt = istreams[inputIdx].pkt;
            }

            LOG_TRACE( randomizer_base,  "PUSHING DATA   ITEMS/STREAM_ID " << ostream->nitems() << "/" << ostream->streamID );    
            if ( _maintainTimeStamp ) {

                // set time stamp for output samples based on input time stamp
                if ( ostream->nelems() == 0 )  {
#ifdef TEST_TIME_STAMP
      LOG_DEBUG( randomizer_base, "SEED - TS SRI:  xdelta:" << std::setprecision(12) << ostream->sri.xdelta );
      LOG_DEBUG( randomizer_base, "OSTREAM WRITE:   maint:" << _maintainTimeStamp );
      LOG_DEBUG( randomizer_base, "                  mode:" <<  ostream->tstamp.tcmode );
      LOG_DEBUG( randomizer_base, "                status:" <<  ostream->tstamp.tcstatus );
      LOG_DEBUG( randomizer_base, "                offset:" <<  ostream->tstamp.toff );
      LOG_DEBUG( randomizer_base, "                 whole:" <<  std::setprecision(10) << ostream->tstamp.twsec );
      LOG_DEBUG( randomizer_base, "SEED - TS         frac:" <<  std::setprecision(12) << ostream->tstamp.tfsec );
#endif
                    ostream->setTimeStamp( pkt->T, _maintainTimeStamp );
                }

                // write out samples, and set next time stamp based on xdelta and  noutput_items
                ostream->write ( noutput_items, eos );
            } else {
// use incoming packet's time stamp to forward
                if ( pkt ) {
#ifdef TEST_TIME_STAMP
      LOG_DEBUG( randomizer_base, "OSTREAM  SRI:  items/xdelta:" << noutput_items << "/" << std::setprecision(12) << ostream->sri.xdelta );
      LOG_DEBUG( randomizer_base, "PKT - TS         maint:" << _maintainTimeStamp );
      LOG_DEBUG( randomizer_base, "                  mode:" <<  pkt->T.tcmode );
      LOG_DEBUG( randomizer_base, "                status:" <<  pkt->T.tcstatus );
      LOG_DEBUG( randomizer_base, "                offset:" <<  pkt->T.toff );
      LOG_DEBUG( randomizer_base, "                 whole:" <<  std::setprecision(10) << pkt->T.twsec );
      LOG_DEBUG( randomizer_base, "PKT - TS          frac:" <<  std::setprecision(12) << pkt->T.tfsec );
#endif
                    ostream->write( noutput_items, eos, pkt->T  );
                } else {
#ifdef TEST_TIME_STAMP
      LOG_DEBUG( randomizer_base, "OSTREAM  SRI:  items/xdelta:" << noutput_items << "/" << std::setprecision(12) << ostream->sri.xdelta );
      LOG_DEBUG( randomizer_base, "OSTREAM TOD      maint:" << _maintainTimeStamp );
      LOG_DEBUG( randomizer_base, "                  mode:" <<  ostream->tstamp.tcmode );
      LOG_DEBUG( randomizer_base, "                status:" <<  ostream->tstamp.tcstatus );
      LOG_DEBUG( randomizer_base, "                offset:" <<  ostream->tstamp.toff );
      LOG_DEBUG( randomizer_base, "                 whole:" <<  std::setprecision(10) << ostream->tstamp.twsec );
      LOG_DEBUG( randomizer_base, "OSTREAM TOD       frac:" <<  std::setprecision(12) << ostream->tstamp.tfsec );
#endif
                    // use time of day as time stamp
                    ostream->write( noutput_items, eos,  _maintainTimeStamp );
                }
            }

        } // for ostreams
    }

    return nout;     
}
コード例 #19
0
bool VisusIndexedData::getText(XMLNode& node, std::vector<std::vector<T > >* items, XMLDataStorage storageType)
{
  int numItems = xmltoi(node.getAttribute("numItems"));
  int bufsize  = xmltoi(node.getAttribute("bufsize"));
  unsigned char* buffer = new unsigned char[bufsize+1];

  switch (storageType)
  {
    case BASE64:
    {
      // Retrieve data as BASE64
      XMLParserBase64Tool base64;
      base64.decode(node.getText(), buffer, bufsize);
    }
    break;
    case EXTERNAL_FILE:
    {
      vwarning("saving data to external file is not yet supported");
      return false;
    }
    break;
    case ASCII:
    {
      vwarning("saving data to external file is not yet supported");
      return false;
    }
    break;
  }

  // Ensure vector is appropriate size
  if ((int)items->capacity() < numItems)
    items->resize(numItems);

  // Copy from contiguous buffer into non-contiguous data
  int position = 0;
  for (typename std::vector<std::vector<T> >::iterator iiter=items->begin(); 
       iiter!=items->end(); ++iiter) 
  {
    T value;
    memcpy(&value, &buffer[position], sizeof(T));
    position += sizeof(T);

    // Ensure item that is vector is appropriate size
    if ((int)iiter->capacity() < (long) value)
      iiter->resize((long)value);

    // Load up item that is vector
    for (typename std::vector<T>::iterator idtIter=iiter->begin(); 
       idtIter!=iiter->end(); ++idtIter) 
    {
       vassert(position < bufsize);
       memcpy(&(*idtIter), &buffer[position], sizeof(T));
       position += sizeof(T);
    }
  }
  vassert(position == bufsize);

  delete [] buffer;

  return true;
}