Пример #1
0
void
MsgProcessor::append( msg_ptr msg )
{
    if( QThread::currentThread() != thread() )
    {
        qDebug() << "reinvoking msgprocessor::append in correct thread, ie not" << QThread::currentThread();
        QMetaObject::invokeMethod( this, "append", Qt::QueuedConnection, Q_ARG(msg_ptr, msg) );
        return;
    }

    m_msgs.append( msg );
    m_msg_ready.insert( msg.data(), false );

    m_totmsgsize += msg->payload().length();

    if( m_mode & NOTHING )
    {
        //qDebug() << "MsgProcessor::NOTHING";
        handleProcessedMsg( msg );
        return;
    }

    QFuture<msg_ptr> fut = QtConcurrent::run(&MsgProcessor::process, msg, m_mode, m_threshold);
    QFutureWatcher<msg_ptr> * watcher = new QFutureWatcher<msg_ptr>;
    connect( watcher, SIGNAL( finished() ),
             this, SLOT( processed() ),
             Qt::QueuedConnection );

    watcher->setFuture( fut );
}
Пример #2
0
bool GuassianBlur::process(cv::Mat image)
{
    cv::Mat out;
    cv::GaussianBlur(image, out, cv::Size(settingsWidget->xKernel,settingsWidget->yKernel),0,0);
    emit processed(out);
    return true;
}
Пример #3
0
void csPdfSearch::progressUpdate()
{
  const int p = qBound(0, _cntDone * 100 / _numToDo, 100);
  if( p != _lastProgress ) {
    emit processed(p);
  }
  _lastProgress = p;
}
Пример #4
0
shared_ptr<Epetra_CrsMatrix> sparseCholesky(const Epetra_CrsMatrix &mat) {
  // Note: we assume the matrix mat is symmetric and positive-definite
  size_t size = mat.NumGlobalCols();
  if (mat.NumGlobalRows() != size)
    throw std::invalid_argument("sparseCholesky(): matrix must be square");

  int *rowOffsets = 0;
  int *colIndices = 0;
  double *values = 0;
  mat.ExtractCrsDataPointers(rowOffsets, colIndices, values);

  Epetra_SerialComm comm;
  Epetra_LocalMap rowMap(static_cast<int>(size), 0 /* index_base */, comm);
  Epetra_LocalMap columnMap(static_cast<int>(size), 0 /* index_base */, comm);
  shared_ptr<Epetra_CrsMatrix> result = boost::make_shared<Epetra_CrsMatrix>(
      Copy, rowMap, columnMap, mat.GlobalMaxNumEntries());

  arma::Mat<double> localMat;
  arma::Mat<double> localCholesky;
  std::vector<bool> processed(size, false);
  for (size_t r = 0; r < size; ++r) {
    if (processed[r])
      continue;
    int localSize = rowOffsets[r + 1] - rowOffsets[r];
    localMat.set_size(localSize, localSize);
    localMat.fill(0.);
    localCholesky.set_size(localSize, localSize);
    for (int s = 0; s < localSize; ++s) {
      int row = colIndices[rowOffsets[r] + s];
      for (int c = 0; c < localSize; ++c) {
        int col = colIndices[rowOffsets[row] + c];
        if (col != colIndices[rowOffsets[r] + c])
          throw std::invalid_argument("sparseCholesky(): matrix is not "
                                      "block-diagonal");
        localMat(s, c) = values[rowOffsets[row] + c];
      }
    }
    assert(arma::norm(localMat - localMat.t(), "fro") <
           1e-12 * arma::norm(localMat, "fro"));
    localCholesky = arma::chol(localMat); // localCholesky: U
    for (int s = 0; s < localSize; ++s) {
      int row = colIndices[rowOffsets[r] + s];
      processed[row] = true;
#ifndef NDEBUG
      int errorCode =
#endif
          result->InsertGlobalValues(row, s + 1 /* number of values */,
                                     localCholesky.colptr(s),
                                     colIndices + rowOffsets[r]);
      assert(errorCode == 0);
    }
  }
  result->FillComplete(columnMap, rowMap);

  return result;
}
void WorkspaceDevPurgeWorker::run()
{
  QRegExp FilterRegExp(m_BuildDirRegexStr);

  for (auto& WType : m_Selection)
  {
    for (auto& WItem : WType.second)
    {
      writeMessage();
      writeMessage("====== "+WItem.ID+" ======");

      QDir Dir(WItem.Path);
      Dir.setFilter(QDir::Dirs | QDir::NoDotAndDotDot);

      QFileInfoList SubDirs = Dir.entryInfoList();

      for (auto SubDir : SubDirs)
      {
        if (FilterRegExp.exactMatch(SubDir.fileName()))
        {
          openfluid::tools::Filesystem::removeDirectory(SubDir.absoluteFilePath().toStdString());

          if (openfluid::tools::Filesystem::isDirectory(SubDir.absoluteFilePath().toStdString()))
          {
            writeMessage(SubDir.fileName() + ": <font style='color: red;'>"+tr("Error")+"</font>");
            emit processed(WType.first,WItem.ID,"purge", false);
          }
          else
          {
            writeMessage(SubDir.fileName() + ": <font style='color: green;'>"+tr("Deleted")+"</font>");
            emit processed(WType.first,WItem.ID,"purge", true);
          }
        }
      }
      writeMessage();

      emit wareCompleted();
    }
  }
  emit finished();
}
Пример #6
0
void GTcpFlowMgr::process(GPacket* packet) {
  long now = packet->ts_.tv_sec;
  if (checkInterval_ != 0 && now - lastCheckTick_ >= checkInterval_)
  {
    deleteOldFlowMaps(packet);
    lastCheckTick_ = now;
  }

  GIpHdr* ipHdr = packet->pdus_.findFirst<GIpHdr>();
  if (ipHdr == nullptr) return;

  GTcpHdr* tcpHdr = packet->pdus_.findFirst<GTcpHdr>();
  if (tcpHdr == nullptr) return;

  GFlow::TcpFlowKey key{ipHdr->sip(), tcpHdr->sport(), ipHdr->dip(), tcpHdr->dport()};

  FlowMap::iterator it = flowMap_.find(key);
  if (it == flowMap_.end()) {
    GFlow::Value* value = GFlow::Value::allocate(packet->ts_, GFlow::Value::Half, requestItems_.totalMemSize_);
    it = flowMap_.insert(key, value);
    key_ = const_cast<GFlow::TcpFlowKey*>(&it.key());
    value_ = value;
    emit _flowCreated(packet);

    GFlow::TcpFlowKey reverseKey = GFlow::TcpFlowKey(it.key()).reverse();
    FlowMap::iterator reverseIt = flowMap_.find(reverseKey);
    if (reverseIt != flowMap_.end()) {
      it.value()->state_ = GFlow::Value::Full;
      reverseIt.value()->state_ = GFlow::Value::Full;
    }
  } else {
    GFlow::Value* value = it.value();
    value->ts_ = packet->ts_;
  }

  if ((tcpHdr->flags() & (TH_RST | TH_FIN)) != 0) {
    GFlow::Value* value = it.value();
    GFlow::Value::State state = (tcpHdr->flags() & TH_RST) ? GFlow::Value::Rst : GFlow::Value::Fin;
    value->state_ = state;
    GFlow::TcpFlowKey reverseKey = GFlow::TcpFlowKey(it.key()).reverse();
    FlowMap::iterator reverseIt = flowMap_.find(reverseKey);
    if (reverseIt != flowMap_.end()) {
      reverseIt.value()->state_ = state;
    }
  }

  this->key_ = const_cast<GFlow::TcpFlowKey*>(&it.key());
  this->value_ = it.value();
  emit processed(packet);
}
Пример #7
0
void PlotBuilderWorker::process()
{
    if(_function != NULL) {
        for(_valueCurrent;
            (_valueCurrent <= _valueTo) && _manager->isProcessing();
            _valueCurrent += _step) {
//            QThread::msleep(10); // emulate slowness :)

            double y = _function->calculate(_valueCurrent);

            emit processed(_valueCurrent, y, getProgress());
        }

        if (_manager->isProcessing()) {
            emit finished();
        }
    }
}
Пример #8
0
const MonoBuffer& ColorDetector::process(ColorBuffer& image)
{
	if (m_outBuffer.size() != image.size()) {
		m_outBuffer = MonoBuffer(image.size());
	}
	ColorBuffer::const_iterator itIn = image.begin();
	ColorBuffer::const_iterator itInEnd = image.end();
	MonoBuffer::iterator itOut = m_outBuffer.begin();
	MonoBuffer::iterator itOutEnd = m_outBuffer.end();
	for ( ;itIn != itInEnd && itOut != itOutEnd; ++itIn, ++itOut) {
		if (getDistance(*itIn) > m_threshold) {
			*itOut = 0xFF;
		} else {
			*itOut = 0;
		}
	}
	emit processed(m_outBuffer);
	return m_outBuffer;
}
Пример #9
0
char *get_status ( int hosts_size )
{
    json_lib::Status json_val;
    std::map<std::string, int> total_status;
    std::map<std::string, std::map<std::string, int> > status;

    std::string processed ("processed_items");
    std::string synced ("synced_items");
    total_status[processed] = * ( uint64_t * ) total_status_addr;
    total_status[synced]    = * ( uint64_t * ) ( total_status_addr + sizeof (uint64_t ) );

    for ( int i = 0; i < hosts_size; i ++ )
    {
        std::map<std::string, int> single_status;
        single_status[processed]    = * ( uint64_t * ) total_status_addrs[i];
        single_status[synced]       = * ( uint64_t * ) ( total_status_addrs[i] + sizeof (uint64_t ) );
        status[hosts[i]]            = single_status;
    }

    json_val.total_status   = total_status;
    json_val.status         = status;

    std::string res;


    if ( ! json_lib::Serialize (json_val, res) )
    {
        return NULL;
    }

    size_t status_size = res.size ();
    char *status_addr   = ( char * ) malloc (status_size + 1);
    if ( ! status_addr )
    {
        return NULL;
    }
    memcpy (status_addr, res.c_str (), status_size);
    status_addr[status_size] = '\0';

    return status_addr;
}
Пример #10
0
  void UpdateManager::replyFinished(QNetworkReply* reply)
  {
    // finished after this
    m_finished = true;

    if (reply){
      // don't delete here
      reply->deleteLater();

      m_error = (reply->error() != QNetworkReply::NoError);
      if (!m_error){

        // create xml document to read the response
        QDomDocument document;
        document.setContent(reply->readAll());
        QDomNodeList openstudioelements = document.elementsByTagName("openstudio");

        if (openstudioelements.size() > 0)
        {
          // Only process the first one for now
          QDomNodeList nodes = openstudioelements.at(0).childNodes();

          // all child nodes will be releases
          for(int i = 0; i < nodes.size(); ++i){
            QDomElement release = nodes.at(i).toElement();
            if (!release.isNull()){
              if (!checkRelease(release)){
                // break if not newer than current 
                break;
              }
            }
          }
        }
      }else{
        LOG(Error, "QNetworkReply " << reply->error());
      }
    }

    emit processed();
  }
void planning_models::KinematicModel::buildGroups(const std::vector<GroupConfig>& group_configs) 
{
  //the only thing tricky is dealing with subgroups
  std::vector<bool> processed(group_configs.size(), false);
  while(true) {
    //going to make passes until we can't do anything else
    bool added = false;
    for(unsigned int i = 0; i < group_configs.size(); i++) {
      if(!processed[i]) {
        //if we haven't processed, check and see if the dependencies are met yet
        bool all_subgroups_added = true;
        for(unsigned int j = 0; j < group_configs[i].subgroups_.size(); j++) {
          if(joint_model_group_map_.find(group_configs[i].subgroups_[j]) ==
             joint_model_group_map_.end()) {
            all_subgroups_added = false;
            break;
          }
        }
        if(all_subgroups_added) {
          //only get one chance to do it right
          if(addModelGroup(group_configs[i])) {
            processed[i] = true;
            added = true;
          } else {
            ROS_WARN_STREAM("Failed to add group " << group_configs[i].name_);
          }
        }
      }
    }
    if(!added) {
      for(unsigned int i = 0; i < processed.size(); i++) {
        if(!processed[i]) {
          ROS_WARN_STREAM("Could not process group " << group_configs[i].name_ << " due to unmet subgroup dependencies");
        }
      }
      return;
    }
  }
}
Пример #12
0
void test_expression(const char*                                                    expression_string,
                     const boost::function<adobe::array_t (const adobe::array_t&)>& process_func)
{
    adobe::array_t    expression;
    std::stringstream stream(expression_string);

    adobe::expression_parser(stream, adobe::line_position_t("expression_string")).require_expression(expression);
    
    std::cout << std::endl << "-=- testing expression -=- " << std::endl << std::endl;

    std::cout << "Raw token stream:" << std::endl;
    std::cout << adobe::begin_asl_cel_unsafe << expression << adobe::end_asl_cel_unsafe << std::endl;
    
    std::cout << "Round-trip expression:" << std::endl;
    std::cout << adobe::format_expression(expression) << std::endl;
    
    adobe::array_t processed(process_func(expression));
    
    std::cout << "Raw Processed expression:" << std::endl;
    std::cout << adobe::begin_asl_cel_unsafe << processed << adobe::end_asl_cel_unsafe << std::endl;
    
    std::cout << "Processed expression:" << std::endl;
    std::cout << adobe::format_expression(processed) << std::endl;

    static adobe::virtual_machine_t vm;

    vm.evaluate(expression);

    std::cout << "VM Round-trip expression:" << std::endl;
    std::cout << adobe::begin_asl_cel_unsafe << vm.back().value_m << adobe::end_asl_cel_unsafe << std::endl;

    vm.pop_back();
    vm.evaluate(processed);

    std::cout << "VM processed expression:" << std::endl;
    std::cout << adobe::begin_asl_cel_unsafe << vm.back().value_m << adobe::end_asl_cel_unsafe << std::endl;

    vm.pop_back();
}
Пример #13
0
bool get_status (int hosts_size, std::string& resp)
{
    jos_lib::Status json_val;
    std::map<std::string, int> total_status;
    std::map<std::string, std::map<std::string, int> > status;

    std::string processed ("processed_items");
    std::string synced ("synced_items");
    total_status[processed] = * ( uint64_t * ) total_status_addr;
    total_status[synced]    = * ( uint64_t * ) ( total_status_addr + sizeof (uint64_t ) );

    for ( int i = 0; i < hosts_size; i ++ )
    {
        std::map<std::string, int> single_status;
        single_status[processed]    = * ( uint64_t * ) total_status_addrs[i];
        single_status[synced]       = * ( uint64_t * ) ( total_status_addrs[i] + sizeof (uint64_t ) );
        status[hosts[i]]            = single_status;
    }

    json_val.total_status   = total_status;
    json_val.status         = status;

    return jos_lib::Serialize <jos_lib::Status, true> (json_val, resp);
}
Пример #14
0
void FxChannel::doProcessing()
{
	const fpp_t fpp = Engine::mixer()->framesPerPeriod();
	const bool exporting = Engine::getSong()->isExporting();

	if( m_muted == false )
	{
		for( FxRoute * senderRoute : m_receives )
		{
			FxChannel * sender = senderRoute->sender();
			FloatModel * sendModel = senderRoute->amount();
			if( ! sendModel ) qFatal( "Error: no send model found from %d to %d", senderRoute->senderIndex(), m_channelIndex );

			if( sender->m_hasInput || sender->m_stillRunning )
			{
				// figure out if we're getting sample-exact input
				ValueBuffer * sendBuf = sendModel->valueBuffer();
				ValueBuffer * volBuf = sender->m_volumeModel.valueBuffer();

				// mix it's output with this one's output
				sampleFrame * ch_buf = sender->m_buffer;

				// use sample-exact mixing if sample-exact values are available
				if( ! volBuf && ! sendBuf ) // neither volume nor send has sample-exact data...
				{
					const float v = sender->m_volumeModel.value() * sendModel->value();
					if( exporting ) { MixHelpers::addSanitizedMultiplied( m_buffer, ch_buf, v, fpp ); }
					else { MixHelpers::addMultiplied( m_buffer, ch_buf, v, fpp ); }
				}
				else if( volBuf && sendBuf ) // both volume and send have sample-exact data
				{
					if( exporting ) { MixHelpers::addSanitizedMultipliedByBuffers( m_buffer, ch_buf, volBuf, sendBuf, fpp ); }
					else { MixHelpers::addMultipliedByBuffers( m_buffer, ch_buf, volBuf, sendBuf, fpp ); }
				}
				else if( volBuf ) // volume has sample-exact data but send does not
				{
					const float v = sendModel->value();
					if( exporting ) { MixHelpers::addSanitizedMultipliedByBuffer( m_buffer, ch_buf, v, volBuf, fpp ); }
					else { MixHelpers::addMultipliedByBuffer( m_buffer, ch_buf, v, volBuf, fpp ); }
				}
				else // vice versa
				{
					const float v = sender->m_volumeModel.value();
					if( exporting ) { MixHelpers::addSanitizedMultipliedByBuffer( m_buffer, ch_buf, v, sendBuf, fpp ); }
					else { MixHelpers::addMultipliedByBuffer( m_buffer, ch_buf, v, sendBuf, fpp ); }
				}
				m_hasInput = true;
			}
		}


		const float v = m_volumeModel.value();

		if( m_hasInput )
		{
			// only start fxchain when we have input...
			m_fxChain.startRunning();
		}

		m_stillRunning = m_fxChain.processAudioBuffer( m_buffer, fpp, m_hasInput );

		float peakLeft = 0.;
		float peakRight = 0.;
		Engine::mixer()->getPeakValues( m_buffer, fpp, peakLeft, peakRight );
		m_peakLeft = qMax( m_peakLeft, peakLeft * v );
		m_peakRight = qMax( m_peakRight, peakRight * v );
	}
	else
	{
		m_peakLeft = m_peakRight = 0.0f;
	}

	// increment dependency counter of all receivers
	processed();
}
void EuclideanClusterExtractorCurvature::segment(std::vector<pcl::PointIndices::Ptr> &segments)
{
  //ptrdiff_t (*p_myrandom)(ptrdiff_t) = myrandom;
  //uncommnet srand if you want indices to be truely
  if(useSrand_)
  {
    srand(time(NULL));
  }
  if(!treeSet_)
  {
    std::cerr<<"No KdTree was set...creating"<<std::endl;
    createTree();
  }

  // \note If the tree was created over <cloud, indices>, we guarantee a 1-1 mapping between what the tree returns
  //and indices[i]
  if(tree_->getInputCloud()->points.size() != cloud_->size())
  {
    std::cerr<<"[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset ("
            << tree_->getInputCloud()->points.size()
            <<") than the input cloud ("<<cloud_->size()<<")!"<<std::endl;
    return;
  }
  if(!normalsSet_)
  {
    std::cerr<<"No Normals were set...calculating"<<std::endl;
    calculateNormals();
  }

  if(cloud_->size() != normals_->points.size())
  {
    std::cerr<<"[pcl::extractEuclideanClusters] Number of points in the input point cloud ("
            <<cloud_->size()<<") different than normals ("<<normals_->size()<<")!"<<std::endl;
    return;
  }

  // Create a bool vector of processed point indices, and initialize it to false
  std::vector<bool> processed(cloud_->size(), false);

  //create a random copy of indices
  std::vector<int> indices_rnd(cloud_->size());
  for(unsigned int i = 0; i < indices_rnd.size(); ++i)
  {
    indices_rnd[i] = i;
  }
  //uncommnet myrandom part if you want indices to be truely
  if(useSrand_)
  {
    std::random_shuffle(indices_rnd.begin(), indices_rnd.end(), myrandom);
    std::cerr<<"\"REAL\" RANDOM"<<std::endl;
  }
  else
  {
    std::random_shuffle(indices_rnd.begin(), indices_rnd.end());
  }
  std::cerr << "Processed size: " << processed.size() << std::endl;
  std::vector<int> index_lookup(indices_rnd.size());
  for(unsigned int i = 0; i < indices_rnd.size(); ++i)
  {
    index_lookup[indices_rnd[i]] = i;
  }

  std::vector<int> nn_indices;
  std::vector<float> nn_distances;

  // Process all points in the indices vector
  for(size_t i = 0; i < indices_rnd.size(); ++i)
  {

    if(processed[i] || normals_->points[indices_rnd[i]].curvature > maxCurvature_)
    {
      /*if(normals.points[indices_rnd[i]].curvature > max_curvature)
              std::cerr<<"Curvature of point skipped: "<<normals.points[indices_rnd[i]].curvature<<std::endl;*/
      continue;
    }
    pcl::PointIndices::Ptr seed_queue(new pcl::PointIndices());
    int sq_idx = 0;
    seed_queue->indices.push_back(indices_rnd[i]);

    processed[i] = true;

    while(sq_idx < (int)seed_queue->indices.size())
    {
      // Search for sq_idx
      if(!tree_->radiusSearch(seed_queue->indices[sq_idx], distTolerance_, nn_indices, nn_distances))
      {
        sq_idx++;
        continue;
      }

      for(size_t j = 1; j < nn_indices.size(); ++j)               // nn_indices[0] should be sq_idx
      {
        // std::cerr<<nn_indices[j]<<std::endl;
        if(processed[index_lookup[nn_indices[j]]])                              // Has this point been processed before ?
        {
          continue;
        }

        // [-1;1]
        double dot_p =
            normals_->points[indices_rnd[i]].normal[0] * normals_->points[nn_indices[j]].normal[0] +
            normals_->points[indices_rnd[i]].normal[1] * normals_->points[nn_indices[j]].normal[1] +
            normals_->points[indices_rnd[i]].normal[2] * normals_->points[nn_indices[j]].normal[2];
        if(fabs(acos(dot_p)) < eps_angle_)
        {
          processed[index_lookup[nn_indices[j]]] = true;
          seed_queue->indices.push_back(nn_indices[j]);
        }
      }
      sq_idx++;
    }

    // If this queue is satisfactory, add to the clusters
    if(seed_queue->indices.size() >= minPts_ && seed_queue->indices.size() <= maxPts_)
    {
      seed_queue->header = cloud_->header;
      segments.push_back(seed_queue);
    }
  }
  int unprocessed_counter = 0;
  for(unsigned int i = 0; i < processed.size(); ++i)
  {
    if(processed[i] == false)
    {
      //std::cerr<<"Indice not processed at " <<i<<" : "<<indices_rnd[i]<<std::endl;
      unprocessed_counter++;
    }
  }
  std::cerr<<"Number of unprocessed indices: "<<unprocessed_counter<<std::endl;
}
Пример #16
0
void
SmoothEuclideanSegmenter<PointT>::segment()
{
    size_t max_pts_per_cluster = std::numeric_limits<int>::max();

    clusters_.clear();
    CHECK (scene_->points.size() == normals_->points.size ());

    if(!octree_ || octree_->getInputCloud() != scene_) {// create an octree for search
        octree_.reset( new pcl::octree::OctreePointCloudSearch<PointT> (param_.octree_resolution_ ) );
        octree_->setInputCloud(scene_);
        octree_->addPointsFromInputCloud();
    }

    // Create a bool vector of processed point indices, and initialize it to false
    std::vector<bool> processed (scene_->points.size (), false);
    std::vector<int> nn_indices;
    std::vector<float> nn_distances;

    float eps_angle_threshold_rad = pcl::deg2rad(param_.eps_angle_threshold_deg_);

    // Process all points in the indices vector
    for (size_t i = 0; i < scene_->points.size (); ++i)
    {
        if (processed[i] || !pcl::isFinite(scene_->points[i]))
            continue;

        std::vector<size_t> seed_queue;
        size_t sq_idx = 0;
        seed_queue.push_back (i);

        processed[i] = true;

        // this is used if planar surface extraction only is enabled
        Eigen::Vector3f avg_normal = normals_->points[i].getNormalVector3fMap();
        Eigen::Vector3f avg_plane_pt = scene_->points[i].getVector3fMap();

        while (sq_idx < seed_queue.size ())
        {
            size_t sidx = seed_queue[sq_idx];
            const PointT &query_pt = scene_->points[ sidx ];
            const pcl::Normal &query_n = normals_->points[ sidx ];

            if (normals_->points[ sidx ].curvature > param_.curvature_threshold_)
            {
                sq_idx++;
                continue;
            }

            // Search for sq_idx - scale radius with distance of point (due to noise)
            float radius = param_.cluster_tolerance_;
            float curvature_threshold = param_.curvature_threshold_;
            float eps_angle_threshold = eps_angle_threshold_rad;

            if ( param_.z_adaptive_ )
            {
                radius = param_.cluster_tolerance_ * ( 1 + (std::max(query_pt.z, 1.f) - 1.f));
                curvature_threshold = param_.curvature_threshold_ * ( 1 + (std::max(query_pt.z, 1.f) - 1.f));
                eps_angle_threshold = eps_angle_threshold_rad * ( 1 + (std::max(query_pt.z, 1.f) - 1.f));
            }

            if (!scene_->isOrganized() && !param_.force_unorganized_)
            {
                if(!octree_->radiusSearch (query_pt, radius, nn_indices, nn_distances))
                {
                    sq_idx++;
                    continue;
                }
            }
            else    // check pixel neighbors
            {
                int width = scene_->width;
                int height = scene_->height;
                int u = sidx%width;
                int v = sidx/width;

                nn_indices.resize(8);
                nn_distances.resize(8);
                size_t kept=0;
                for(int shift_u=-1; shift_u<=1; shift_u++)
                {
                    int uu = u + shift_u;
                    if ( uu < 0 || uu>=width)
                        continue;

                    for(int shift_v=-1; shift_v<=1; shift_v++)
                    {
                        int vv = v + shift_v;
                        if ( vv < 0 || vv>=height)
                            continue;

                        int nn_idx = vv*width + uu;
                        float dist = ( scene_->points[sidx].getVector3fMap() - scene_->points[nn_idx].getVector3fMap() ).norm();
                        if (dist < radius)
                        {
                            nn_indices[kept] = nn_idx;
                            nn_distances[kept] = dist;
                            kept++;
                        }
                    }
                }
                nn_indices.resize(kept);
                nn_distances.resize(kept);
            }

            for (size_t j = 0; j < nn_indices.size (); j++)
            {
                if ( processed[nn_indices[j]] ) // Has this point been processed before ?
                    continue;

                if (normals_->points[nn_indices[j]].curvature > curvature_threshold)
                    continue;

                Eigen::Vector3f n1;
                if(param_.compute_planar_patches_only_)
                    n1 = avg_normal;
                else
                    n1 = query_n.getNormalVector3fMap();

                pcl::Normal nn = normals_->points[ nn_indices[j] ];
                const Eigen::Vector3f &n2 = nn.getNormalVector3fMap();

                double dot_p = n1.dot(n2);

                if (fabs (dot_p) > cos(eps_angle_threshold))
                {
                    if(param_.compute_planar_patches_only_)
                    {
                        const Eigen::Vector3f &nn_pt = scene_->points[ nn_indices[j] ].getVector3fMap();
                        float dist = fabs(avg_normal.dot(nn_pt - avg_plane_pt));

                        if(dist > param_.planar_inlier_dist_)
                            continue;

                        runningAverage( avg_normal, seed_queue.size(), n2 );
                        avg_normal.normalize();
                        runningAverage( avg_plane_pt, seed_queue.size(), nn_pt );
                    }

                    processed[nn_indices[j]] = true;
                    seed_queue.push_back (nn_indices[j]);
                }
            }

            sq_idx++;
        }

        // If this queue is satisfactory, add to the clusters
        if (seed_queue.size () >= param_.min_points_ && seed_queue.size () <= max_pts_per_cluster)
        {
            std::vector<int> r;
            r.resize ( seed_queue.size () );
            for (size_t j = 0; j < seed_queue.size (); ++j)
                r[j] = seed_queue[j];

            std::sort (r.begin (), r.end ());
            r.erase (std::unique (r.begin (), r.end ()), r.end ());
            clusters_.push_back ( r ); // We could avoid a copy by working directly in the vector
        }
    }
}
Пример #17
0
template<typename PointInT, typename PointNT, typename PointOutT> void
pcl::CVFHEstimation<PointInT, PointNT, PointOutT>::extractEuclideanClustersSmooth (
    const pcl::PointCloud<pcl::PointNormal> &cloud,
    const pcl::PointCloud<pcl::PointNormal> &normals,
    float tolerance,
    const pcl::search::Search<pcl::PointNormal>::Ptr &tree,
    std::vector<pcl::PointIndices> &clusters,
    double eps_angle,
    unsigned int min_pts_per_cluster,
    unsigned int max_pts_per_cluster)
{
  if (tree->getInputCloud ()->points.size () != cloud.points.size ())
  {
    PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset (%zu) than the input cloud (%zu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
    return;
  }
  if (cloud.points.size () != normals.points.size ())
  {
    PCL_ERROR ("[pcl::extractEuclideanClusters] Number of points in the input point cloud (%zu) different than normals (%zu)!\n", cloud.points.size (), normals.points.size ());
    return;
  }

  // Create a bool vector of processed point indices, and initialize it to false
  std::vector<bool> processed (cloud.points.size (), false);

  std::vector<int> nn_indices;
  std::vector<float> nn_distances;
  // Process all points in the indices vector
  for (int i = 0; i < static_cast<int> (cloud.points.size ()); ++i)
  {
    if (processed[i])
      continue;

    std::vector<unsigned int> seed_queue;
    int sq_idx = 0;
    seed_queue.push_back (i);

    processed[i] = true;

    while (sq_idx < static_cast<int> (seed_queue.size ()))
    {
      // Search for sq_idx
      if (!tree->radiusSearch (seed_queue[sq_idx], tolerance, nn_indices, nn_distances))
      {
        sq_idx++;
        continue;
      }

      for (size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
      {
        if (processed[nn_indices[j]]) // Has this point been processed before ?
          continue;

        //processed[nn_indices[j]] = true;
        // [-1;1]

        double dot_p = normals.points[seed_queue[sq_idx]].normal[0] * normals.points[nn_indices[j]].normal[0]
                     + normals.points[seed_queue[sq_idx]].normal[1] * normals.points[nn_indices[j]].normal[1]
                     + normals.points[seed_queue[sq_idx]].normal[2] * normals.points[nn_indices[j]].normal[2];

        if (fabs (acos (dot_p)) < eps_angle)
        {
          processed[nn_indices[j]] = true;
          seed_queue.push_back (nn_indices[j]);
        }
      }

      sq_idx++;
    }

    // If this queue is satisfactory, add to the clusters
    if (seed_queue.size () >= min_pts_per_cluster && seed_queue.size () <= max_pts_per_cluster)
    {
      pcl::PointIndices r;
      r.indices.resize (seed_queue.size ());
      for (size_t j = 0; j < seed_queue.size (); ++j)
        r.indices[j] = seed_queue[j];

      std::sort (r.indices.begin (), r.indices.end ());
      r.indices.erase (std::unique (r.indices.begin (), r.indices.end ()), r.indices.end ());

      r.header = cloud.header;
      clusters.push_back (r); // We could avoid a copy by working directly in the vector
    }
  }
}
Пример #18
0
void
pcl::seededHueSegmentation (const PointCloud<PointXYZRGB>                         &cloud, 
                            const boost::shared_ptr<search::Search<PointXYZRGB> > &tree,
                            float                                                 tolerance, 
                            PointIndices                                          &indices_in,
                            PointIndices                                          &indices_out,
                            float                                                 delta_hue)
{
  if (tree->getInputCloud ()->points.size () != cloud.points.size ())
  {
    PCL_ERROR ("[pcl::seededHueSegmentation] Tree built for a different point cloud dataset (%zu) than the input cloud (%zu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
    return;
  }
  // Create a bool vector of processed point indices, and initialize it to false
  std::vector<bool> processed (cloud.points.size (), false);

  std::vector<int> nn_indices;
  std::vector<float> nn_distances;

  // Process all points in the indices vector
  for (size_t k = 0; k < indices_in.indices.size (); ++k)
  {
    int i = indices_in.indices[k];
    if (processed[i])
      continue;

    processed[i] = true;

    std::vector<int> seed_queue;
    int sq_idx = 0;
    seed_queue.push_back (i);

    PointXYZRGB  p;
    p = cloud.points[i];
    PointXYZHSV h;
    PointXYZRGBtoXYZHSV(p, h);

    while (sq_idx < static_cast<int> (seed_queue.size ()))
    {
      int ret = tree->radiusSearch (seed_queue[sq_idx], tolerance, nn_indices, nn_distances, std::numeric_limits<int>::max());
      if(ret == -1)
        PCL_ERROR("[pcl::seededHueSegmentation] radiusSearch returned error code -1");
      // Search for sq_idx
      if (!ret)
      {
        sq_idx++;
        continue;
      }

      for (size_t j = 1; j < nn_indices.size (); ++j)             // nn_indices[0] should be sq_idx
      {
        if (processed[nn_indices[j]])                             // Has this point been processed before ?
          continue;

        PointXYZRGB  p_l;
        p_l = cloud.points[nn_indices[j]];
        PointXYZHSV h_l;
        PointXYZRGBtoXYZHSV(p_l, h_l);

        if (fabs(h_l.h - h.h) < delta_hue)
        {
          seed_queue.push_back (nn_indices[j]);
          processed[nn_indices[j]] = true;
        }
      }

      sq_idx++;
    }
    // Copy the seed queue into the output indices
    for (size_t l = 0; l < seed_queue.size (); ++l)
      indices_out.indices.push_back(seed_queue[l]);
  }
  // This is purely esthetical, can be removed for speed purposes
  std::sort (indices_out.indices.begin (), indices_out.indices.end ());
}
Пример #19
0
template <typename PointT> void
pcl::extractLabeledEuclideanClusters (const PointCloud<PointT> &cloud, 
                                      const boost::shared_ptr<search::Search<PointT> > &tree,
                                      float tolerance, 
                                      std::vector<std::vector<PointIndices> > &labeled_clusters,
                                      unsigned int min_pts_per_cluster, 
                                      unsigned int max_pts_per_cluster,
                                      unsigned int max_label)
{
  if (tree->getInputCloud ()->points.size () != cloud.points.size ())
  {
    PCL_ERROR ("[pcl::extractLabeledEuclideanClusters] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", (unsigned long)tree->getInputCloud ()->points.size (), (unsigned long)cloud.points.size ());
    return;
  }
  // Create a bool vector of processed point indices, and initialize it to false
  std::vector<bool> processed (cloud.points.size (), false);

  std::vector<int> nn_indices;
  std::vector<float> nn_distances;

  // Process all points in the indices vector
  for (size_t i = 0; i < cloud.points.size (); ++i)
  {
    if (processed[i])
      continue;

    std::vector<int> seed_queue;
    int sq_idx = 0;
    seed_queue.push_back (i);

    processed[i] = true;

    while (sq_idx < (int)seed_queue.size ())
    {
      // Search for sq_idx
      if (!tree->radiusSearch (seed_queue[sq_idx], tolerance, nn_indices, nn_distances))
      {
        sq_idx++;
        continue;
      }

      for (size_t j = 1; j < nn_indices.size (); ++j)             // nn_indices[0] should be sq_idx
      {
        if (processed[nn_indices[j]])                             // Has this point been processed before ?
          continue;
        if (cloud.points[i].label == cloud.points[nn_indices[j]].label)
        {
          // Perform a simple Euclidean clustering
          seed_queue.push_back (nn_indices[j]);
          processed[nn_indices[j]] = true;
        }
      }

      sq_idx++;
    }

    // If this queue is satisfactory, add to the clusters
    if (seed_queue.size () >= min_pts_per_cluster && seed_queue.size () <= max_pts_per_cluster)
    {
      pcl::PointIndices r;
      r.indices.resize (seed_queue.size ());
      for (size_t j = 0; j < seed_queue.size (); ++j)
        r.indices[j] = seed_queue[j];

      std::sort (r.indices.begin (), r.indices.end ());
      r.indices.erase (std::unique (r.indices.begin (), r.indices.end ()), r.indices.end ());

      r.header = cloud.header;
      labeled_clusters[cloud.points[i].label].push_back (r);   // We could avoid a copy by working directly in the vector
    }
  }
}
Пример #20
0
void DenseReconstruction::activeSegmentation (const pcl::PointCloud<pcl::PointXYZLRegionF> &cloud_input,
    float search_radius,
    double eps_angle,
    int fp_index,
    pcl::PointIndices &indices_out)
{




	  pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
	  pcl::PointCloud<pcl::Boundary> boundary;


	  normalsEstimation(cloud_input.makeShared(),normals);

	  boundaryEstimation(cloud_input.makeShared(),normals,boundary);


	  std::cerr<<"Index of seed point is: "<<fp_index<<std::endl;
	  std::cerr<<"Curvature of seed point: "<<normals->points[fp_index].curvature<<std::endl;


	pcl::PointCloud<pcl::PointXYZRGBA> cloud_in;
	pcl::copyPointCloud(cloud_input,cloud_in);

	  pcl::search::KdTree<pcl::PointXYZRGBA>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZRGBA> ());
	  tree->setInputCloud(cloud_in.makeShared());


  if (fp_index > cloud_in.points.size () || fp_index <0)
  {
    PCL_ERROR ("[pcl::activeSegmentation] Fixation point given is invalid\n");
    return;
  }
  if (boundary.points.size () != cloud_in.points.size ())
  {
    PCL_ERROR ("[pcl::activeSegmentation] Boundary map given was built for a different dataset (%zu) than the input cloud (%zu)!\n",
        boundary.points.size () , cloud_in.points.size ());
    return;
  }
  if (tree->getInputCloud ()->points.size () != cloud_in.points.size ())
  {
    PCL_ERROR ("[pcl::activeSegmentation] Tree built for a different point cloud dataset (%zu) than the input cloud (%zu)!\n",
        tree->getInputCloud ()->points.size (), cloud_in.points.size ());
    return;
  }
  if (normals->points.size () != cloud_in.points.size ())
  {
    PCL_ERROR ("[pcl::activeSegmentation] Input Normals are for a different point cloud dataset (%zu) than the input cloud (%zu)!\n",
        normals->points.size (), cloud_in.points.size ());
    return;
  }
  std::vector<int> seed_queue;
  std::vector<bool> processed (cloud_in.size(), false);
  seed_queue.push_back (fp_index);
  indices_out.indices.push_back (fp_index);
  processed[fp_index] = true;
  std::vector<int> nn_indices;
  std::vector<float> nn_distances;
  //process while there are valid seed points

  for(size_t seed_idx = 0; seed_idx < seed_queue.size();++seed_idx)
  {

    int curr_seed;
    curr_seed = seed_queue[seed_idx];

    // Search for seeds
    if (!tree->radiusSearch (curr_seed, search_radius, nn_indices, nn_distances))
      continue;
    //process all points found in the neighborhood
    bool stop_growing = false;
    size_t indices_old_size = indices_out.indices.size();
    for (size_t i=1; i < nn_indices.size (); ++i)
    {
      if (processed[nn_indices.at (i)])
        continue;
      if (boundary.points[nn_indices.at (i)].boundary_point != 0)
      {
        stop_growing=true;
        indices_out.indices.push_back (nn_indices.at (i));
        processed[nn_indices.at (i)] = true;
        break;
      }

      bool is_convex = false;
      pcl::PointXYZRGBA temp;
      temp.x = cloud_in.points[fp_index].x - cloud_in.points[nn_indices.at (i)].x;
      temp.y = cloud_in.points[fp_index].y - cloud_in.points[nn_indices.at (i)].y;
      temp.z = cloud_in.points[fp_index].z - cloud_in.points[nn_indices.at (i)].z;

      double dot_p = normals->points[nn_indices.at (i)].normal[0] * temp.x
          + normals->points[nn_indices.at (i)].normal[1] * temp.y
          + normals->points[nn_indices.at (i)].normal[2] * temp.z;

      dot_p = dot_p>1? 1:dot_p;
      dot_p = dot_p<-1 ? -1:dot_p;

      if ((acos (dot_p) > eps_angle*M_PI / 180))
        is_convex = true;


      if (is_convex)
      {
        indices_out.indices.push_back (nn_indices.at (i));
        processed[nn_indices.at (i)] = true;
      }
      else
        break;
    }//new neighbor

    if(!stop_growing && (indices_old_size != indices_out.indices.size()))
    {
      for (size_t j = indices_old_size-1; j < indices_out.indices.size(); ++j)
        seed_queue.push_back(indices_out.indices.at (j));
    }
  }//new seed point

}
Пример #21
0
/** @todo: fix the return value, make sure the exit is not needed anymore*/
template <typename PointT> void
pcl16::extractEuclideanClusters (const PointCloud<PointT> &cloud, 
                               const std::vector<int> &indices,
                               const boost::shared_ptr<search::Search<PointT> > &tree,
                               float tolerance, std::vector<PointIndices> &clusters,
                               unsigned int min_pts_per_cluster, 
                               unsigned int max_pts_per_cluster)
{
  // \note If the tree was created over <cloud, indices>, we guarantee a 1-1 mapping between what the tree returns
  //and indices[i]
  if (tree->getInputCloud ()->points.size () != cloud.points.size ())
  {
    PCL16_ERROR ("[pcl16::extractEuclideanClusters] Tree built for a different point cloud dataset (%zu) than the input cloud (%zu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
    return;
  }
  if (tree->getIndices ()->size () != indices.size ())
  {
    PCL16_ERROR ("[pcl16::extractEuclideanClusters] Tree built for a different set of indices (%zu) than the input set (%zu)!\n", tree->getIndices ()->size (), indices.size ());
    return;
  }

  // Create a bool vector of processed point indices, and initialize it to false
  std::vector<bool> processed (cloud.points.size (), false);

  std::vector<int> nn_indices;
  std::vector<float> nn_distances;
  // Process all points in the indices vector
  for (int i = 0; i < static_cast<int> (indices.size ()); ++i)
  {
    if (processed[indices[i]])
      continue;

    std::vector<int> seed_queue;
    int sq_idx = 0;
    seed_queue.push_back (indices[i]);

    processed[indices[i]] = true;

    while (sq_idx < static_cast<int> (seed_queue.size ()))
    {
      // Search for sq_idx
      int ret = tree->radiusSearch (cloud.points[seed_queue[sq_idx]], tolerance, nn_indices, nn_distances);
      if( ret == -1)
      {
        PCL16_ERROR("[pcl16::extractEuclideanClusters] Received error code -1 from radiusSearch\n");
        exit(0);
      }
      if (!ret)
      {
        sq_idx++;
        continue;
      }

      for (size_t j = 1; j < nn_indices.size (); ++j)             // nn_indices[0] should be sq_idx
      {
        if (nn_indices[j] == -1 || processed[nn_indices[j]])        // Has this point been processed before ?
          continue;

        // Perform a simple Euclidean clustering
        seed_queue.push_back (nn_indices[j]);
        processed[nn_indices[j]] = true;
      }

      sq_idx++;
    }

    // If this queue is satisfactory, add to the clusters
    if (seed_queue.size () >= min_pts_per_cluster && seed_queue.size () <= max_pts_per_cluster)
    {
      pcl16::PointIndices r;
      r.indices.resize (seed_queue.size ());
      for (size_t j = 0; j < seed_queue.size (); ++j)
        // This is the only place where indices come into play
        r.indices[j] = seed_queue[j];

      // These two lines should not be needed: (can anyone confirm?) -FF
      //r.indices.assign(seed_queue.begin(), seed_queue.end());
      std::sort (r.indices.begin (), r.indices.end ());
      r.indices.erase (std::unique (r.indices.begin (), r.indices.end ()), r.indices.end ());

      r.header = cloud.header;
      clusters.push_back (r);   // We could avoid a copy by working directly in the vector
    }
  }
}
void
seededHueSegmentation (const boost::shared_ptr<pcl::PointCloud<pcl::PointXYZRGB> >  &host_cloud_,
                       const pcl::gpu::Octree::Ptr                                  &tree,
                       float                                                        tolerance,
                       PointIndices                                                 &indices_in,
                       PointIndices                                                 &indices_out,
                       float                                                        delta_hue)
{

  // Create a bool vector of processed point indices, and initialize it to false
  // cloud is a DeviceArray<PointType>
  std::vector<bool> processed (host_cloud_->points.size (), false);

  int max_answers = host_cloud_->points.size();

  // Process all points in the indices vector
  for (size_t k = 0; k < indices_in.indices.size (); ++k)
  {
    int i = indices_in.indices[k];
    // if we already processed this point continue with the next one
    if (processed[i])
      continue;
    // now we will process this point
    processed[i] = true;

    PointXYZRGB  p;
    p = host_cloud_->points[i];
    PointXYZHSV h;
    PointXYZRGBtoXYZHSV(p, h);

    // Create the query queue on the device, point based not indices
    pcl::gpu::Octree::Queries queries_device;
    // Create the query queue on the host
    pcl::PointCloud<pcl::PointXYZ>::VectorType queries_host;
    // Push the starting point in the vector
    queries_host.push_back (host_cloud_->points[i]);

    unsigned int found_points = queries_host.size ();
    unsigned int previous_found_points = 0;

    pcl::gpu::NeighborIndices result_device;

    // Host buffer for results
    std::vector<int> sizes, data;

    // once the area stop growing, stop also iterating.
    while (previous_found_points < found_points)
    {
      // Move queries to GPU
      queries_device.upload(queries_host);
      // Execute search
      tree->radiusSearch(queries_device, tolerance, max_answers, result_device);

      // Store the previously found number of points
      previous_found_points = found_points;

      // Clear the Host vectors
      sizes.clear (); data.clear ();

      // Copy results from GPU to Host
      result_device.sizes.download (sizes);
      result_device.data.download (data);

      for(size_t qp = 0; qp < sizes.size (); qp++)
      {
        for(int qp_r = 0; qp_r < sizes[qp]; qp_r++)
        {
          if(processed[data[qp_r + qp * max_answers]])
            continue;

          PointXYZRGB  p_l;
          p_l = host_cloud_->points[data[qp_r + qp * max_answers]];
          PointXYZHSV h_l;
          PointXYZRGBtoXYZHSV(p_l, h_l);

          if (fabs(h_l.h - h.h) < delta_hue)
          {
            processed[data[qp_r + qp * max_answers]] = true;
            queries_host.push_back (host_cloud_->points[data[qp_r + qp * max_answers]]);
            found_points++;
          }
        }
      }
    }
    for(size_t qp = 0; qp < sizes.size (); qp++)
    {
      for(int qp_r = 0; qp_r < sizes[qp]; qp_r++)
      {
        indices_out.indices.push_back(data[qp_r + qp * max_answers]);
      }
    }
  }
  // @todo: do we need to sort here and remove double points?
}
Пример #23
0
void extractEuclideanClusters (
      PointCloud<PointXYZRGB >::Ptr cloud, pcl::PointCloud<pcl::Normal >::Ptr normals,
      pcl::search::KdTree<PointXYZRGB >::Ptr tree, 
      float tolerance, std::vector<pcl::PointIndices > &clusters, double eps_angle,
      unsigned int min_pts_per_cluster = 1,
      unsigned int max_pts_per_cluster = (std::numeric_limits<int >::max) ())
  {
    // \note If the tree was created over <cloud, indices>, we guarantee a 1-1 mapping between what the tree returns
    //and indices[i]
    float adjTolerance = 0;

    // Create a bool vector of processed point indices, and initialize it to false
    std::vector<bool > processed(cloud->points.size(), false);

	std::vector<int> nn_indices;
	std::vector<float> nn_distances;
	// Process all points in the indices vector
	std::cout << "Point size is " << cloud->points.size () << std::endl;
	for (size_t i = 0; i < cloud->points.size (); ++i)
    {
		if(processed[i])
			continue;

		std::vector<int > seed_queue;
		int sq_idx = 0;
		seed_queue.push_back(i);
		processed[i] = true;

		int cnt = 0;

		while (sq_idx < (int)seed_queue.size())
		{ 
			cnt++;

			// Search for sq_idx
//			 adjTolerance = cloud->points[seed_queue[sq_idx]].distance * tolerance;
			adjTolerance = tolerance;

			if (!tree->radiusSearch(seed_queue[sq_idx], adjTolerance, nn_indices, nn_distances))
	        {
				sq_idx++;
				continue;
			}

			for(size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
			{
				if (processed[nn_indices[j]]) // Has this point been processed before ?
					continue;

				processed[nn_indices[j]] = true;
				// [-1;1]
				double dot_p =
				normals->points[i].normal[0] * normals->points[nn_indices[j]].normal[0] +
				normals->points[i].normal[1] * normals->points[nn_indices[j]].normal[1] +
				normals->points[i].normal[2] * normals->points[nn_indices[j]].normal[2];
				if ( fabs (acos (dot_p)) < eps_angle )
				{
					processed[nn_indices[j]] = true;
					seed_queue.push_back (nn_indices[j]);
				}
	        }

			sq_idx++;
		}

		// If this queue is satisfactory, add to the clusters
		if (seed_queue.size () >= min_pts_per_cluster && seed_queue.size () <= max_pts_per_cluster)
		{
			pcl::PointIndices r;
			r.indices.resize (seed_queue.size ());
			for (size_t j = 0; j < seed_queue.size (); ++j)
				r.indices[j] = seed_queue[j];

			sort (r.indices.begin (), r.indices.end ());
			r.indices.erase (unique (r.indices.begin (), r.indices.end ()), r.indices.end ());

			r.header = cloud->header;
			//ROS_INFO ("cluster of size %d data point\n ",r.indices.size());
			clusters.push_back(r);
		}
	}
}
template<typename PointT> void
pcl::ConditionalEuclideanClustering<PointT>::segment (pcl::IndicesClusters &clusters)
{
  // Prepare output (going to use push_back)
  clusters.clear ();
  if (extract_removed_clusters_)
  {
    small_clusters_->clear ();
    large_clusters_->clear ();
  }

  // Validity checks
  if (!initCompute () || input_->points.empty () || indices_->empty () || !condition_function_)
    return;

  // Initialize the search class
  if (!searcher_)
  {
    if (input_->isOrganized ())
      searcher_.reset (new pcl::search::OrganizedNeighbor<PointT> ());
    else
      searcher_.reset (new pcl::search::KdTree<PointT> ());
  }
  searcher_->setInputCloud (input_, indices_);

  // Temp variables used by search class
  std::vector<int> nn_indices;
  std::vector<float> nn_distances;

  // Create a bool vector of processed point indices, and initialize it to false
  // Need to have it contain all possible points because radius search can not return indices into indices
  std::vector<bool> processed (input_->points.size (), false);

  // Process all points indexed by indices_
  for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii)  // iii = input indices iterator
  {
    // Has this point been processed before?
    if ((*indices_)[iii] == -1 || processed[(*indices_)[iii]])
      continue;

    // Set up a new growing cluster
    std::vector<int> current_cluster;
    int cii = 0;  // cii = cluster indices iterator

    // Add the point to the cluster
    current_cluster.push_back ((*indices_)[iii]);
    processed[(*indices_)[iii]] = true;

    // Process the current cluster (it can be growing in size as it is being processed)
    while (cii < static_cast<int> (current_cluster.size ()))
    {
      // Search for neighbors around the current seed point of the current cluster
      if (searcher_->radiusSearch (input_->points[current_cluster[cii]], cluster_tolerance_, nn_indices, nn_distances) < 1)
      {
        cii++;
        continue;
      }

      // Process the neighbors
      for (int nii = 1; nii < static_cast<int> (nn_indices.size ()); ++nii)  // nii = neighbor indices iterator
      {
        // Has this point been processed before?
        if (nn_indices[nii] == -1 || processed[nn_indices[nii]])
          continue;

        // Validate if condition holds
        if (condition_function_ (input_->points[current_cluster[cii]], input_->points[nn_indices[nii]], nn_distances[nii]))
        {
          // Add the point to the cluster
          current_cluster.push_back (nn_indices[nii]);
          processed[nn_indices[nii]] = true;
        }
      }
      cii++;
    }

    // If extracting removed clusters, all clusters need to be saved, otherwise only the ones within the given cluster size range
    if (extract_removed_clusters_ || (current_cluster.size () >= min_cluster_size_ && current_cluster.size () <= max_cluster_size_))
    {
      pcl::PointIndices pi;
      pi.header = input_->header;
      pi.indices.resize (current_cluster.size ());
      for (int ii = 0; ii < static_cast<int> (current_cluster.size ()); ++ii)  // ii = indices iterator
        pi.indices[ii] = current_cluster[ii];

      if (extract_removed_clusters_ && current_cluster.size () < min_cluster_size_)
        small_clusters_->push_back (pi);
      else if (extract_removed_clusters_ && current_cluster.size () > max_cluster_size_)
        large_clusters_->push_back (pi);
      else
        clusters.push_back (pi);
    }
  }

  deinitCompute ();
}
Пример #25
0
std::vector<std::vector<PointId>> extractClusters(PointView& view,
                                                  uint64_t min_points,
                                                  uint64_t max_points,
                                                  double tolerance)
{
    // Index the incoming PointView for subsequent radius searches.
    KD3Index kdi(view);
    kdi.build();

    // Create variables to track PointIds that have already been added to
    // clusters and to build the list of cluster indices.
    std::vector<PointId> processed(view.size(), 0);
    std::vector<std::vector<PointId>> clusters;

    for (PointId i = 0; i < view.size(); ++i)
    {
        // Points can only belong to a single cluster.
        if (processed[i])
            continue;

        // Initialize list of indices belonging to current cluster, marking the
        // seed point as processed.
        std::vector<PointId> seed_queue;
        size_t sq_idx = 0;
        seed_queue.push_back(i);
        processed[i] = 1;

        // Check each point in the cluster for additional neighbors within the
        // given tolerance, remembering that the list can grow if we add points
        // to the cluster.
        while (sq_idx < seed_queue.size())
        {
            // Find neighbors of the next cluster point.
            PointId j = seed_queue[sq_idx];
            std::vector<PointId> ids = kdi.radius(j, tolerance);

            // The case where the only neighbor is the query point.
            if (ids.size() == 1)
            {
                sq_idx++;
                continue;
            }

            // Skip neighbors that already belong to a cluster and add the rest
            // to this cluster.
            for (auto const& k : ids)
            {
                if (processed[k])
                    continue;
                seed_queue.push_back(k);
                processed[k] = 1;
            }

            sq_idx++;
        }

        // Keep clusters that are within the min/max number of points.
        if (seed_queue.size() >= min_points && seed_queue.size() <= max_points)
            clusters.push_back(seed_queue);
    }

    return clusters;
}
Пример #26
0
void DenseReconstruction::extractEuclideanClustersCurvature(std::vector<pcl::PointIndices::Ptr> &clusters){


	float tolerance=0.01;//0.01radius of KDTree radius search in region growing in meters
	double eps_angle=35*M_PI/180;//35, 10
	double max_curvature=0.1;//0.1  //max value of the curvature of the point form which you can start region growing
	unsigned int min_pts_per_cluster = 1;
	unsigned int max_pts_per_cluster = (std::numeric_limits<int>::max) ()	;
    // Create a bool vector of processed point indices, and initialize it to false
    std::vector<bool> processed (region_grow_point_cloud_->size (), false);

    //create a random copy of indices
    std::vector<int> indices_rnd(region_grow_point_cloud_->size ());
    for(unsigned int i=0;i<indices_rnd.size();++i)
    {
      indices_rnd[i]=i;
    }
    //uncommnet myrandom part if you want indices to be truely
//    if(use_srand == 1)
//    {
//      std::random_shuffle(indices_rnd.begin(), indices_rnd.end(), myrandom);
//      ROS_ERROR("REAL RANDOM");
//    }
//    else
      std::random_shuffle(indices_rnd.begin(), indices_rnd.end());
    std::cerr<<"Processed size: "<<processed.size()<<std::endl;
    std::vector<int> index_lookup(indices_rnd.size());
    for(unsigned int i= 0; i<indices_rnd.size();++i)
      index_lookup[indices_rnd[i]] = i;

    std::vector<int> nn_indices;
    std::vector<float> nn_distances;
    // Process all points in the indices vector
    for (size_t i = 0; i < indices_rnd.size (); ++i)
    {

      if (processed[i] || cloud_normals_->points[indices_rnd[i]].curvature > max_curvature)
      {
        /*if(normals.points[indices_rnd[i]].curvature > max_curvature)
          std::cerr<<"Curvature of point skipped: "<<normals.points[indices_rnd[i]].curvature<<std::endl;*/
        continue;
      }
      pcl::PointIndices::Ptr seed_queue(new pcl::PointIndices());
      int sq_idx = 0;
      seed_queue->indices.push_back (indices_rnd[i]);

      processed[i] = true;

      while (sq_idx < (int)seed_queue->indices.size ())
      {
        // Search for sq_idx
        if (!tree_->radiusSearch (seed_queue->indices[sq_idx], tolerance, nn_indices, nn_distances))
        {
          sq_idx++;
          continue;
        }

        for (size_t j = 1; j < nn_indices.size (); ++j)             // nn_indices[0] should be sq_idx
        {
          // std::cerr<<nn_indices[j]<<std::endl;
          if (processed[index_lookup[nn_indices[j]]])                             // Has this point been processed before ?
            continue;

          // [-1;1]
          double dot_p =
			  cloud_normals_->points[indices_rnd[i]].normal[0] * cloud_normals_->points[nn_indices[j]].normal[0] +
			  cloud_normals_->points[indices_rnd[i]].normal[1] * cloud_normals_->points[nn_indices[j]].normal[1] +
			  cloud_normals_->points[indices_rnd[i]].normal[2] * cloud_normals_->points[nn_indices[j]].normal[2];
          if ( fabs (acos (dot_p)) < eps_angle )
          {
            processed[index_lookup[nn_indices[j]]] = true;
            seed_queue->indices.push_back (nn_indices[j]);
          }
        }
        sq_idx++;
      }

      // If this queue is satisfactory, add to the clusters
      if (seed_queue->indices.size () >= min_pts_per_cluster && seed_queue->indices.size () <= max_pts_per_cluster)
      {
        seed_queue->header = region_grow_point_cloud_->header;
        clusters.push_back (seed_queue);
      }
    }
    int unprocessed_counter = 0;
    for(unsigned int i =0; i<processed.size(); ++i)
    {
      if(processed[i] == false)
      {
        //std::cerr<<"Indice not processed at " <<i<<" : "<<indices_rnd[i]<<std::endl;
        unprocessed_counter++;
      }
    }
    //std::cerr<<"Number of unprocessed indices: "<<unprocessed_counter<<std::endl;





}
Пример #27
0
void QmlScanner::processingFinished(const QImage& image) {
    processed(image);

    m_busy = false;
    busyChanged();
}
Пример #28
0
// For each cluster we check if we can identify an independent
// bag, which might be useful for clustered planarity testing.
// compute independent bag affiliation for all vertices,
// store result in m_indyBagNumber, and set m_numIndyBags accordingly.
// We could interweave this with computeBags to be more efficient but
// this would it make the code more complex and in case we don't need
// the IndyBags we would do additional work in vain.
void ClusterAnalysis::computeIndyBags() {
	m_numIndyBags = 0; //used both to count the bags and to store current
					   //indyBag index number for vertex assignment (i.e., starts with 1)
	const Graph &G = m_C->constGraph();

	// Store the root cluster of each indyBag
	delete m_indyBagRoots;
	// Intermediate storage during computation, maximum of #vertices possible
#ifdef OGDF_DEBUG
	Array<cluster> bagRoots(0,G.numberOfNodes(),nullptr);
#else
	Array<cluster> bagRoots(G.numberOfNodes());
#endif

	// Store indyBag affiliation. Every vertex will get a number != -1 (DefaultIndex,
	// as in the worst case the whole graph is an indyBag (in root cluster).
	// Once assigned, the number won't change during the processing.
	m_indyBagNumber.init(G, DefaultIndex);

	// We run bottom up over all clusters (to find the minimum inclusion).
	// For each vertex, we use the outer activity and bag index information.
	// In case we find a bag without outeractive vertices it is a IndyBag.
	// Already processed vertices are simply marked by an indyBag index entry different to -1.
#if 0
	NodeArray<bool> processed(G, false); //mark vertices when IndyBag detected
#endif

	// We do not have the sets of vertices for all bags, as only the bag index
	// has been stored for a cluster.
	// Detect the current leaf clusters for bottom up traversal.
	List<cluster> ccleafs;
	ClusterArray<int> unprocessedChildren(*m_C); //processing below: compute bags
	for(cluster c : m_C->clusters)
	{
		if (c->cCount() == 0) ccleafs.pushBack(c);
		unprocessedChildren[c] = c->cCount();
	}
	OGDF_ASSERT(!ccleafs.empty());

	// Run through all clusters, leaves first.
	while (!ccleafs.empty()){
		// We cannot store the following information over the whole
		// graph even though the bag index (which is one out of the
		// set of vertex set ids in union find) would allow this.
		// The bag index is defined per cluster.
		// However, when moving up in the cluster tree indy information
		// is not monotone, we therefore would need to update it accordingly.
		// (Bag which is not outeractive will not become outeractive, but
		// may get a part of an outeractive bag with the same id, and an
		// outeractive bag might become enclosed).
		HashArray<int, bool> indyBag(true); // true if bag with index i does not have outeractive vertices
		// We want to store all vertices for each index that may be a
		// potential indyBag index. We could add these in the entry stored
		// in our index Skiplist, but then we need a comparison of the
		// respecting class objects, slowing down the processing.
		HashArray<int, List<node> > bagNodes; //holds vertices for index i

		// We need a data structure that holds all indexes used, allows
		// to search for them and to iterate through them. Could use
		// insertion sorted dynamically allocated array here, but as
		// SkipLists are implemented in OGDF, which provide comparable
		// efficiency, we use them.
		Skiplist<int*> indexNumbers; //holds all index numbers in c

		cluster c = ccleafs.popFrontRet();

		List<node> nodes;
		ListConstIterator<node> it;
		// Process leaves separately. As long as we don't do something
		// special for non-leaves, we could just use getClusterNodes instead.
		if (c->cCount() == 0) {
			it = c->nBegin();
		}
		else {
			// At this point all child clusters of c have been processed.
			// This is somewhat slow, as we repeatedly collect the vertices
			// Todo: Instead, we could clone the clusternodes code
			// here and process them as we see them.

			c->getClusterNodes(nodes);

#if 0
			std::cout <<"Processing cluster with "<<nodes.size()<<"\n";
#endif

			it = nodes.begin();
		}
		// Run through all vertices in c
		partitionCluster(it, c, bagNodes, indyBag, indexNumbers, bagRoots);
		// Now we update the status of the parent cluster and,
		// in case all its children are processed, add it to
		// the process queue.
		if (c != m_C->rootCluster())
		{
			OGDF_ASSERT(unprocessedChildren[c->parent()] > 0);
			unprocessedChildren[c->parent()]--;
			if (unprocessedChildren[c->parent()] == 0) ccleafs.pushBack(c->parent());
		}
	}
	// Copying into smaller array is a bit slower than just reserving the whole #v array,
	// but we do it anyway.
	m_indyBagRoots = new cluster[m_numIndyBags];
	for (int k = 0; k < m_numIndyBags; k++)
	{
		OGDF_ASSERT(bagRoots[k] != nullptr);
		m_indyBagRoots[k] = bagRoots[k];
	}
#ifdef OGDF_DEBUG
#if 0
	List<node> rnodes;
	m_C->rootCluster()->getClusterNodes(rnodes);

	for(node v : rnodes) {
		std::cout << "Root bag index: "<<bagIndex(v, m_C->rootCluster())<<"\n";
		std::cout << "Indy bag index: "<<m_indyBagNumber[v]<<"\n";
	}
#endif

	Skiplist<int*> ibind;
	for(node v : G.nodes)
	{
		int i = m_indyBagNumber[v];
		if (!ibind.isElement(&i))
		{
			ibind.add(new int(i));
		}
		std::cout << "numIndyBags: "<<m_numIndyBags<<" i: "<<i<<"\n";
		OGDF_ASSERT(i != DefaultIndex);
		OGDF_ASSERT(i >= 0);
		OGDF_ASSERT(i < m_numIndyBags);

	}
	int storedBags = 0;
	SkiplistIterator<int*> its = ibind.begin();
	while (its.valid())
	{
		storedBags++;
		delete *its;
		++its;
	}
	Logger::slout() << m_numIndyBags<< " independent bags detected, "<<storedBags<<" stored\n";
	OGDF_ASSERT(m_numIndyBags==storedBags);
#endif
}
Пример #29
0
void TLSConnectionPrivate::OnRead(uv_stream_t *stream, ssize_t nread, uv_buf_t buf) {
	TLSConnectionPrivate *cp = static_cast<TLSConnectionPrivate *>(stream->data);
	assert(cp != nullptr);

	// Only allow OnRead calls to have an effect on the TLSConnection in
	// the STARVED_SSL_CONNECT and ESTABLISHED states.
	bool bad = cp->state_ != TLS_CONNECTION_STATE_STARVED_SSL_CONNECT &&
	           cp->state_ != TLS_CONNECTION_STATE_ESTABLISHED;
	if (bad) {
		return;
	}

	if (nread == -1) {
		uv_err_t last = uv_last_error(cp->loop_);
		if (last.code == UV_EOF) {
			cp->ShutdownRemote();
		} else {
			cp->ShutdownError(UVUtils::ErrorFromUVError(last));
		}
		return;
	} else if (nread == 0) {
		cp->ShutdownRemote();
		return;
	}

	ByteArray b(buf.base, nread, buf.len);
	cp->biostate_->PutNewBuffer(b);

	if (cp->state_ == TLS_CONNECTION_STATE_STARVED_SSL_CONNECT) {
		if (!cp->HandleStarvedConnectState()) {
			return;
		}
	}

	if (cp->state_ == TLS_CONNECTION_STATE_ESTABLISHED) {
		ByteArray processed(static_cast<int>(nread));

		bool backoff = false;
		while (!backoff) {
			int nread = SSL_read(cp->ssl_, reinterpret_cast<void *>(processed.Data()), processed.Capacity());
			if (nread == -1) {
				int err = SSL_get_error(cp->ssl_, nread);
				if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {
					// OpenSSL needs to be fed more data. Back off.
					backoff = true;
					break;
				}
				cp->ShutdownError(OpenSSLUtils::ErrorFromOpenSSLErrorCode(err));
				return;
			} else if (nread == 0) {
				cp->ShutdownRemote();
				return;
			}

			processed.Truncate(nread);
			if (cp->read_handler_) {
				cp->read_handler_(processed);
			}
		}
	}

	return;
}
Пример #30
0
void EuclideanClustering::extractClusters(brics_3d::PointCloud3D *inCloud){


	int k = inCloud->getSize();
	brics_3d::NearestNeighborANN nearestneighborSearch;
	brics_3d::Point3D querryPoint3D;
	vector<int> neighborIndices;

	nearestneighborSearch.setData(inCloud);
	nearestneighborSearch.setMaxDistance(this->clusterTolerance);
	//	nearestneighborSearch.setMaxDistance(1000);
	// Create a bool vector of processed point indices, and initialize it to false
	std::vector<bool> processed (inCloud->getSize(), false);


	// Process all points in the indices vector
	for (size_t i = 0; i < inCloud->getSize(); ++i)
	{
		if (processed[i])
			continue;

		std::vector<int> seed_queue;
		int sq_idx = 0;
		seed_queue.push_back (i);
		//std::cout << "[CHEAT][EuclideanClustering3D] "<< i << " : " << seed_queue.size () << std::endl;
		processed[i] = true;

		while (sq_idx < (int)seed_queue.size ())
		{

			// Search for sq_idx
			neighborIndices.clear();
			querryPoint3D = (*inCloud->getPointCloud())[sq_idx];
			nearestneighborSearch.findNearestNeighbors(&querryPoint3D, &neighborIndices, k);

			//if (!tree->radiusSearch (seed_queue[sq_idx], tolerance, nn_indices, nn_distances))
			if(neighborIndices.size()==0)
			{
				sq_idx++;
				continue;
			}

			for (size_t j = 0; j < neighborIndices.size(); ++j)             // nn_indices[0] should be sq_idx
			{
				if (processed[neighborIndices[j]])                             // Has this point been processed before ?
					continue;

				// Perform a simple Euclidean clustering
				seed_queue.push_back (neighborIndices[j]);
				processed[neighborIndices[j]] = true;
			}

			sq_idx++;
		}
//		std::cout << "[CHEAT][EuclideanClustering3D] {sq_id : seed_q.size}"<< sq_idx << " : "<< seed_queue.size() << std::endl;
		// If this queue is satisfactory, add to the clusters
		if (seed_queue.size () >= minClusterSize && seed_queue.size () <= maxClusterSize)
		{
//			std::cout << "[CHEAT][EuclideanClustering3D] found one cluster, size="<< seed_queue.size() << std::endl;

			seed_queue.erase(std::unique(seed_queue.begin(), seed_queue.end()),seed_queue.end());
			brics_3d::PointCloud3D *tempPointCloud =  new brics_3d::PointCloud3D();

			for (size_t j = 0; j < seed_queue.size (); ++j) {

				tempPointCloud->addPointPtr((*inCloud->getPointCloud())[seed_queue[j]].clone());

			}
			extractedClusters.push_back(tempPointCloud);
//			delete tempPointCloud; //?
		}
//		std::cout << "[CHEAT][EuclideanClustering3D] "<< i << " : " << seed_queue.size () << std::endl;
	}
}