static void update_distance(const GpsSnapshot *gpsSnapshot)
{
    const GeoPoint prev = gpsSnapshot->previousPoint;
    const GeoPoint curr = gpsSnapshot->sample.point;

    if (!isValidPoint(&prev) || !isValidPoint(&curr))
        return;

    g_distance += distPythag(&prev, &curr) / 1000;
}
Esempio n. 2
0
int doSearch(Point start){
    int minDist = INT_MAX;
    pointInMap(start) = 0;
    push(start)
    while (queueIsNotEmpty) {
        Point curr = pop();
        int i;
        for (i = 0; i < MOVCOUNT; i++) {
            Point next = nextPoint(curr, i);
            if (isValidPoint(next)) {
                if (pointInMap(next) == SPOTB && minDist > pointInMap(curr)) {
                    minDist = pointInMap(curr);
                } else if (pointInMap(next) == SPOTA) {
                    pointInMap(next) = 0;
                    push(next);
                } else if (pointInMap(next) == EMPTY) {
                    pointInMap(next) = pointInMap(curr) + 1;
                    push(next);
                } else if (pointInMap(next) > pointInMap(curr) + 1) {
                    pointInMap(next) = pointInMap(curr) + 1;
                    push(next);
                }
            }
        }
    }
    return minDist;
}
Esempio n. 3
0
void search(Step start){
    int x, y;
    for (x = 0; x < mapSize.width; x++){
        for (y = 0; y < mapSize.height; y++){
            visited[x][y] = 0;
        }
    }
    pushHeap(start);
    while(heapIndex >= 0){
        Step step = popHeap();
        if(finished(step)){
            //printf("FOUND: %d, %d.\tValue: %d\tIndex: %d\n", step.point.x, step.point.y, step.distance, heapIndex);
            min = step.distance;
            break;
        }
        if(visited[step.point.x][step.point.y]){
            continue;
        }
        visited[step.point.x][step.point.y] = 1;
        int i;
        for(i = 0; i < 8; i++){
            Point next = nextPoint(step.point, i);
            if(isValidPoint(next)){
                pushHeap(makeStep(next, step.distance + map[next.x][next.y]));
            }
        }
    }
}
 void HeightmapTimeAccumulation::accumulate(
   const sensor_msgs::Image::ConstPtr& msg)
 {
   boost::mutex::scoped_lock lock(mutex_);
   if (!config_) {
     JSK_NODELET_ERROR("no ~input/config is yet available");
     return;
   }
   tf::StampedTransform tf_transform;
   tf_->lookupTransform(fixed_frame_id_, center_frame_id_,
                        msg->header.stamp,
                        tf_transform);
   Eigen::Affine3f from_center_to_fixed;
   tf::transformTFToEigen(tf_transform, from_center_to_fixed);
   cv::Mat new_heightmap = cv_bridge::toCvShare(
     msg, sensor_msgs::image_encodings::TYPE_32FC1)->image;
   // Transform prev_cloud_ to current frame
   Eigen::Affine3f from_prev_to_current
     = prev_from_center_to_fixed_.inverse() * from_center_to_fixed;
   pcl::PointCloud<pcl::PointXYZ> transformed_pointcloud;
   pcl::transformPointCloud(prev_cloud_, transformed_pointcloud, from_prev_to_current.inverse());
   for (size_t i = 0; i < transformed_pointcloud.points.size(); i++) {
     pcl::PointXYZ p = transformed_pointcloud.points[i];
     if (isValidPoint(p)) {
       cv::Point index = toIndex(p, new_heightmap);
       if (isValidIndex(index, new_heightmap)) {
         if (!isValidCell(index, new_heightmap)) {
           new_heightmap.at<float>(index.y, index.x) = p.z;
         }
       }
     }
   }
   publishHeightmap(new_heightmap, msg->header);
   prev_from_center_to_fixed_ = from_center_to_fixed;
 }
Esempio n. 5
0
void FboMaskShape::replacePoint(FboMaskSelection& selection){
	if(selection.valid && !selection.midPoint){
		int max = points.size();
		if(isValidPoint(selection)){
			points[selection.pointID] = selection.point;
		}
	}
}
Esempio n. 6
0
void FboMaskShape::deletePoint(FboMaskSelection& selection){
	if(selection.valid && !selection.midPoint){
		int max = points.size();
		if(isValidPoint(selection)){
			points.erase(points.begin()+selection.pointID);
		}
	}
}
int isStartPointValid(const Track *t)
{
    if (NULL == t)
        return 0;

    const GeoPoint p = getStartPoint(t);
    return isValidPoint(&p);
}
Esempio n. 8
0
double[][] costMap(double energyMap[][],int len1,int len2){
  double costMap[len1][len2];
  //Compute the costs of the top (i.e. the values)
  for(int j=0;j<len2;j++){
    costMap[0][j]=energyMap[0][j];
  }
  //Compute the rest of the minimum costs
  for(int i=1;i<len1;i++){
    for(int j=0;j<len2;j++){
      double min=Double.MAX_VALUE;
      if(isValidPoint(costMap,i-1,j-1,len1,len2) && costMap[i-1][j-1]<min) min=costMap[i-1][j-1];
      if(isValidPoint(costMap,i-1,j,len1,len2) && costMap[i-1][j]<min) min=costMap[i-1][j];
      if(isValidPoint(costMap,i-1,j+1,len1,len2) && costMap[i-1][j+1]<min) min=costMap[i-1][j+1];
      costMap[i][j]=min+energyMap[i][j];
    }
  }
  return costMap;
}
Esempio n. 9
0
BYTE Utils::PointToPos(CPoint Point)
{
	if (isValidPoint(Point))
	{
		return 16 * (BYTE(Point.y) - 1) + BYTE(Point.x);
	}
	else
	{
		return 0;
	}
}
Esempio n. 10
0
void FboMaskShape::insertPoint(FboMaskSelection& selection){
	if(selection.valid && selection.midPoint){
		int max = points.size();
		if(max > 1 && isValidPoint(selection)){
			
			points.insert(points.begin()+selection.pointID+1, selection.point);
			//ofxVec2f& pointA = points[i];
			//ofxVec2f& pointB = points[(i+1)%max];
		}
	}
}
Esempio n. 11
0
GeoPoint getSectorGeoPointAtIndex(const Track *t, int index)
{
    if (index < 0) index = 0;
    const int max = isStage(t) ? STAGE_SECTOR_COUNT : CIRCUIT_SECTOR_COUNT;
    const GeoPoint *sectors = isStage(t) ? t->stage.sectors : t->circuit.sectors;

    if (index < max && isValidPoint(sectors + index))
        return sectors[index];

    // If here, return the finish since that is logically the next sector point.
    return getFinishPoint(t);
}
Esempio n. 12
0
void color(Point start, int col){
    push(start);
    while (queueIsNotEmpty) {
        Point curr = pop();
        pointInMap(curr) = col;
        int i;
        for (i = 0; i < MOVCOUNT; i++) {
            Point next = nextPoint(curr, i);
            if (isValidPoint(next) && pointInMap(next) == SPOT) {
                push(next);
            }
        }
    }
    resetQueue();
}
Esempio n. 13
0
void fillAreaWithNumber(Point point, int number){
    stackIndex = -1;
    pushStack(point);
    while(stackIndex >= 0){
        point = popStack();
        map[point.x][point.y] = 0;
        sea[point.x][point.y] = number;
        if(visited[point.x][point.y]) continue;
        visited[point.x][point.y] = 1;
        int i;
        for(i = 0; i < 8; i++){
            Point next = nextPoint(point, i);
            if(isValidPoint(next) && map[next.x][next.y] < 0){
                pushStack(next);
            }
        }
    }
}
Esempio n. 14
0
void Stereoproc::imageCb(
        const sensor_msgs::ImageConstPtr& l_raw_msg,
        const sensor_msgs::CameraInfoConstPtr& l_info_msg,
        const sensor_msgs::ImageConstPtr& r_raw_msg,
        const sensor_msgs::CameraInfoConstPtr& r_info_msg)
{
    boost::lock_guard<boost::recursive_mutex> config_lock(config_mutex_);
    boost::lock_guard<boost::mutex> connect_lock(connect_mutex_);
    int level = connected_.level();
    NODELET_DEBUG("got images, level %d", level);

    // Update the camera model
    model_.fromCameraInfo(l_info_msg, r_info_msg);

    cv::cuda::Stream l_strm, r_strm;
    cv::cuda::GpuMat l_raw, r_raw;
    std::vector<GPUSender::Ptr> senders;

    // Create cv::Mat views onto all buffers
    const cv::Mat l_cpu_raw = cv_bridge::toCvShare(
            l_raw_msg,
            l_raw_msg->encoding)->image;
    cv::cuda::registerPageLocked(const_cast<cv::Mat&>(l_cpu_raw));
    const cv::Mat r_cpu_raw = cv_bridge::toCvShare(
            r_raw_msg,
            l_raw_msg->encoding)->image;
    cv::cuda::registerPageLocked(const_cast<cv::Mat&>(r_cpu_raw));
    l_raw.upload(l_cpu_raw, l_strm);
    r_raw.upload(r_cpu_raw, r_strm);

    cv::cuda::GpuMat l_mono;
    if(connected_.DebayerMonoLeft || connected_.RectifyMonoLeft || connected_.Disparity || connected_.DisparityVis || connected_.Pointcloud)
    {
        cv::cuda::demosaicing(l_raw, l_mono, CV_BayerRG2GRAY, 1, l_strm);
    }
    if(connected_.DebayerMonoLeft)
    {
       GPUSender::Ptr t(new GPUSender(l_raw_msg, sensor_msgs::image_encodings::MONO8, &pub_mono_left_));
       senders.push_back(t);
       t->enqueueSend(l_mono, l_strm);
    }

    cv::cuda::GpuMat r_mono;
    if(connected_.DebayerMonoRight || connected_.RectifyMonoRight || connected_.Disparity || connected_.DisparityVis || connected_.Pointcloud)
    {
        cv::cuda::demosaicing(r_raw, r_mono, CV_BayerRG2GRAY, 1, r_strm);
    }
    if(connected_.DebayerMonoRight)
    {
       GPUSender::Ptr t(new GPUSender(r_raw_msg, sensor_msgs::image_encodings::MONO8, &pub_mono_right_));
       senders.push_back(t);
       t->enqueueSend(r_mono, r_strm);
    }

    cv::cuda::GpuMat l_color;
    if(connected_.DebayerColorLeft || connected_.RectifyColorLeft || connected_.Pointcloud)
    {
        cv::cuda::demosaicing(l_raw, l_color, CV_BayerRG2BGR, 3, l_strm);
    }
    if(connected_.DebayerColorLeft)
    {
       GPUSender::Ptr t(new GPUSender(l_raw_msg, sensor_msgs::image_encodings::BGR8, &pub_color_left_));
       senders.push_back(t);
       t->enqueueSend(l_color, l_strm);
    }

    cv::cuda::GpuMat r_color;
    if(connected_.DebayerColorRight || connected_.RectifyColorRight)
    {
        cv::cuda::demosaicing(r_raw, r_color, CV_BayerRG2BGR, 3, r_strm);
    }
    if(connected_.DebayerColorRight)
    {
       GPUSender::Ptr t(new GPUSender(r_raw_msg, sensor_msgs::image_encodings::BGR8, &pub_color_right_));
       senders.push_back(t);
       t->enqueueSend(r_color, r_strm);
    }

    cv::cuda::GpuMat l_rect_mono, r_rect_mono;
    if(connected_.RectifyMonoLeft || connected_.Disparity || connected_.DisparityVis || connected_.Pointcloud)
    {
        model_.left().rectifyImageGPU(l_mono, l_rect_mono, cv::INTER_LINEAR, l_strm);
    }
    if(connected_.RectifyMonoRight || connected_.Disparity || connected_.DisparityVis || connected_.Pointcloud)
    {
        model_.right().rectifyImageGPU(r_mono, r_rect_mono, cv::INTER_LINEAR, r_strm);
    }

    if(connected_.RectifyMonoLeft)
    {
       GPUSender::Ptr t(new GPUSender(l_raw_msg, sensor_msgs::image_encodings::MONO8, &pub_mono_rect_left_));
       senders.push_back(t);
       t->enqueueSend(l_rect_mono, l_strm);
    }

    if(connected_.RectifyMonoRight)
    {
       GPUSender::Ptr t(new GPUSender(r_raw_msg, sensor_msgs::image_encodings::MONO8, &pub_mono_rect_right_));
       senders.push_back(t);
       t->enqueueSend(r_rect_mono, r_strm);
    }

    cv::cuda::GpuMat l_rect_color, r_rect_color;
    if(connected_.RectifyColorLeft || connected_.Pointcloud)
    {
        model_.left().rectifyImageGPU(l_color, l_rect_color, cv::INTER_LINEAR, l_strm);
    }
    if(connected_.RectifyColorRight)
    {
        model_.right().rectifyImageGPU(r_color, r_rect_color, cv::INTER_LINEAR, r_strm);
    }

    if(connected_.RectifyColorLeft)
    {
       GPUSender::Ptr t(new GPUSender(l_raw_msg, sensor_msgs::image_encodings::BGR8, &pub_color_rect_left_));
       senders.push_back(t);
       t->enqueueSend(l_rect_color, l_strm);
    }

    if(connected_.RectifyColorRight)
    {
       GPUSender::Ptr t(new GPUSender(r_raw_msg, sensor_msgs::image_encodings::BGR8, &pub_color_rect_right_));
       senders.push_back(t);
       t->enqueueSend(r_rect_color, r_strm);
    }

    cv::cuda::GpuMat disparity;
    cv::cuda::GpuMat disparity_16s;
    cv::cuda::GpuMat disparity_filtered;
    if(connected_.Disparity || connected_.DisparityVis || connected_.Pointcloud)
    {
        r_strm.waitForCompletion();
        block_matcher_->compute(l_rect_mono, r_rect_mono, disparity, l_strm);
        //allocate cpu-side resource
        filter_buf_.create(l_rect_mono.size(), CV_16SC1);
        //enqueueDownload
        disparity.convertTo(disparity_16s, CV_16SC1, 16, l_strm);
        disparity_16s.download(filter_buf_, l_strm);
        l_strm.waitForCompletion();
        filterSpeckles();
        //enqueueUpload
        disparity_16s.upload(filter_buf_, l_strm);
        if(bilateral_filter_enabled_)
        {
            bilateral_filter_->apply(disparity_16s, l_rect_mono, disparity_filtered);
            disparity_filtered.convertTo(disparity, CV_32FC1, 1/16.);
        } else {
            disparity_16s.convertTo(disparity, CV_32FC1, 1/16.);
        }
    }

    if(connected_.Disparity)
    {
        cv::cuda::GpuMat disparity_float;
        if(disparity.type() != CV_32F)
            disparity.convertTo(disparity_float, CV_32FC1);
        else
            disparity_float = disparity;

        // Adjust for any x-offset between the principal points: d' = d - (cx_l - cx_r)
        double cx_l = model_.left().cx();
        double cx_r = model_.right().cx();
        if (cx_l != cx_r)
        {
            cv::cuda::subtract(disparity_float, cv::Scalar(cx_l - cx_r),
                    disparity_float, cv::cuda::GpuMat(), -1, l_strm);
        }

        // Allocate new disparity image message
        disp_msg_.reset(new stereo_msgs::DisparityImage());
        disp_msg_->header         = l_info_msg->header;
        disp_msg_->image.header   = l_info_msg->header;

        // Compute window of (potentially) valid disparities
        int border   = block_matcher_->getBlockSize() / 2;
        int left   = block_matcher_->getNumDisparities() + block_matcher_min_disparity_ + border - 1;
        int wtf = (block_matcher_min_disparity_ >= 0) ? border + block_matcher_min_disparity_ : std::max(border, -block_matcher_min_disparity_);
        int right  = disp_msg_->image.width - 1 - wtf;
        int top    = border;
        int bottom = disp_msg_->image.height - 1 - border;
        disp_msg_->valid_window.x_offset = left;
        disp_msg_->valid_window.y_offset = top;
        disp_msg_->valid_window.width    = right - left;
        disp_msg_->valid_window.height   = bottom - top;
        disp_msg_->min_disparity = block_matcher_min_disparity_ + 1;
        disp_msg_->max_disparity = block_matcher_min_disparity_ + block_matcher_->getNumDisparities() - 1;

        disp_msg_->image.height = l_rect_mono.rows;
        disp_msg_->image.width = l_rect_mono.cols;
        disp_msg_->image.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
        disp_msg_->image.step = l_rect_mono.cols * sizeof(float);
        disp_msg_->image.data.resize(disp_msg_->image.step*disp_msg_->image.height);
        disp_msg_data_ = cv::Mat_<float> (disp_msg_->image.height,
                             disp_msg_->image.width,
                             (float *)&disp_msg_->image.data[0],
                             disp_msg_->image.step);
        cv::cuda::registerPageLocked(disp_msg_data_);

        disparity_float.download(disp_msg_data_, l_strm);

        l_strm.enqueueHostCallback(
                [](int status, void *userData)
                {
                    (void)status;
                    static_cast<Stereoproc*>(userData)->sendDisparity();
                },
                (void*)this);
    }

    if(connected_.DisparityVis)
    {
        GPUSender::Ptr t(new GPUSender(l_raw_msg,
                    sensor_msgs::image_encodings::BGRA8, &pub_disparity_vis_));
        senders.push_back(t);
        cv::cuda::GpuMat disparity_int(l_cpu_raw.size(), CV_16SC1);
        cv::cuda::GpuMat disparity_image(l_cpu_raw.size(), CV_8UC4);
        int ndisp = block_matcher_->getNumDisparities();
        if(disparity.type() == CV_32F)
        {
            disparity.convertTo(disparity_int, CV_16SC1, 16, 0, l_strm);
            ndisp *= 16;
        }
        else
            disparity_int = disparity;
        try
        {
            cv::cuda::drawColorDisp(disparity_int, disparity_image, ndisp, l_strm);
            t->enqueueSend(disparity_image, l_strm);
        }
        catch(cv::Exception e)
        {
            NODELET_ERROR_STREAM("Unable to draw color disparity: " << e.err <<
                             "in " << e.file << ":" << e.func <<
                             " line " << e.line);
        }
    }

    cv::cuda::GpuMat xyz;
    if(connected_.Pointcloud)
    {
        model_.projectDisparityImageTo3dGPU(disparity, xyz, true, l_strm);
        cv::cuda::HostMem cuda_xyz(l_cpu_raw.size(), CV_32FC3);
        xyz.download(cuda_xyz, l_strm);
        cv::Mat l_cpu_color(l_cpu_raw.size(), CV_8UC3);
        l_color.download(l_cpu_color, l_strm);
        sensor_msgs::PointCloud2Ptr points_msg = boost::make_shared<sensor_msgs::PointCloud2>();
        points_msg->header = l_raw_msg->header;
        points_msg->height = l_cpu_raw.rows;
        points_msg->width  = l_cpu_raw.cols;
        points_msg->fields.resize (4);
        points_msg->fields[0].name = "x";
        points_msg->fields[0].offset = 0;
        points_msg->fields[0].count = 1;
        points_msg->fields[0].datatype = sensor_msgs::PointField::FLOAT32;
        points_msg->fields[1].name = "y";
        points_msg->fields[1].offset = 4;
        points_msg->fields[1].count = 1;
        points_msg->fields[1].datatype = sensor_msgs::PointField::FLOAT32;
        points_msg->fields[2].name = "z";
        points_msg->fields[2].offset = 8;
        points_msg->fields[2].count = 1;
        points_msg->fields[2].datatype = sensor_msgs::PointField::FLOAT32;
        points_msg->fields[3].name = "rgb";
        points_msg->fields[3].offset = 12;
        points_msg->fields[3].count = 1;
        points_msg->fields[3].datatype = sensor_msgs::PointField::FLOAT32;
        //points_msg->is_bigendian = false; ???
        static const int STEP = 16;
        points_msg->point_step = STEP;
        points_msg->row_step = points_msg->point_step * points_msg->width;
        points_msg->data.resize (points_msg->row_step * points_msg->height);
        points_msg->is_dense = false; // there may be invalid points

        l_strm.waitForCompletion();
        cv::Mat_<cv::Vec3f> cpu_xyz = cuda_xyz.createMatHeader();
        float bad_point = std::numeric_limits<float>::quiet_NaN ();
        int offset = 0;
        for (int v = 0; v < cpu_xyz.rows; ++v)
        {
            for (int u = 0; u < cpu_xyz.cols; ++u, offset += STEP)
            {
                if (isValidPoint(cpu_xyz(v,u)))
                {
                    // x,y,z,rgba
                    memcpy (&points_msg->data[offset + 0], &cpu_xyz(v,u)[0], sizeof (float));
                    memcpy (&points_msg->data[offset + 4], &cpu_xyz(v,u)[1], sizeof (float));
                    memcpy (&points_msg->data[offset + 8], &cpu_xyz(v,u)[2], sizeof (float));
                }
                else
                {
                    memcpy (&points_msg->data[offset + 0], &bad_point, sizeof (float));
                    memcpy (&points_msg->data[offset + 4], &bad_point, sizeof (float));
                    memcpy (&points_msg->data[offset + 8], &bad_point, sizeof (float));
                }
            }
        }

        // Fill in color
        offset = 0;
        const cv::Mat_<cv::Vec3b> color(l_cpu_color);
        for (int v = 0; v < cpu_xyz.rows; ++v)
        {
            for (int u = 0; u < cpu_xyz.cols; ++u, offset += STEP)
            {
                if (isValidPoint(cpu_xyz(v,u)))
                {
                    const cv::Vec3b& bgr = color(v,u);
                    int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0];
                    memcpy (&points_msg->data[offset + 12], &rgb_packed, sizeof (int32_t));
                }
                else
                {
                    memcpy (&points_msg->data[offset + 12], &bad_point, sizeof (float));
                }
            }
        }

        pub_pointcloud_.publish(points_msg);

    }

    l_strm.waitForCompletion();
    r_strm.waitForCompletion();
    if(connected_.Disparity)
        cv::cuda::unregisterPageLocked(disp_msg_data_);
    //if(connected_.Disparity || connected_.DisparityVis || connected_.Pointcloud)
    //    cv::cuda::unregisterPageLocked(filter_buf_);
    cv::cuda::unregisterPageLocked( const_cast<cv::Mat&>(l_cpu_raw) );
    cv::cuda::unregisterPageLocked( const_cast<cv::Mat&>(r_cpu_raw) );
}
Esempio n. 15
0
void StereoProcessor::processPoints2(const stereo_msgs::DisparityImage& disparity,
                                     const cv::Mat& color, const std::string& encoding,
                                     const image_geometry::StereoCameraModel& model,
                                     sensor_msgs::PointCloud2& points) const
{
  // Calculate dense point cloud
  const sensor_msgs::Image& dimage = disparity.image;
  const cv::Mat_<float> dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step);
  model.projectDisparityImageTo3d(dmat, dense_points_, true);

  // Fill in sparse point cloud message
  points.height = dense_points_.rows;
  points.width  = dense_points_.cols;
  points.fields.resize (4);
  points.fields[0].name = "x";
  points.fields[0].offset = 0;
  points.fields[0].count = 1;
  points.fields[0].datatype = sensor_msgs::PointField::FLOAT32;
  points.fields[1].name = "y";
  points.fields[1].offset = 4;
  points.fields[1].count = 1;
  points.fields[1].datatype = sensor_msgs::PointField::FLOAT32;
  points.fields[2].name = "z";
  points.fields[2].offset = 8;
  points.fields[2].count = 1;
  points.fields[2].datatype = sensor_msgs::PointField::FLOAT32;
  points.fields[3].name = "rgb";
  points.fields[3].offset = 12;
  points.fields[3].count = 1;
  points.fields[3].datatype = sensor_msgs::PointField::FLOAT32;
  //points.is_bigendian = false; ???
  points.point_step = 16;
  points.row_step = points.point_step * points.width;
  points.data.resize (points.row_step * points.height);
  points.is_dense = false; // there may be invalid points
 
  float bad_point = std::numeric_limits<float>::quiet_NaN ();
  int i = 0;
  for (int32_t u = 0; u < dense_points_.rows; ++u) {
    for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
      if (isValidPoint(dense_points_(u,v))) {
        // x,y,z,rgba
        memcpy (&points.data[i * points.point_step + 0], &dense_points_(u,v)[0], sizeof (float));
        memcpy (&points.data[i * points.point_step + 4], &dense_points_(u,v)[1], sizeof (float));
        memcpy (&points.data[i * points.point_step + 8], &dense_points_(u,v)[2], sizeof (float));
      }
      else {
        memcpy (&points.data[i * points.point_step + 0], &bad_point, sizeof (float));
        memcpy (&points.data[i * points.point_step + 4], &bad_point, sizeof (float));
        memcpy (&points.data[i * points.point_step + 8], &bad_point, sizeof (float));
      }
    }
  }

  // Fill in color
  namespace enc = sensor_msgs::image_encodings;
  i = 0;
  if (encoding == enc::MONO8) {
    for (int32_t u = 0; u < dense_points_.rows; ++u) {
      for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
        if (isValidPoint(dense_points_(u,v))) {
          uint8_t g = color.at<uint8_t>(u,v);
          int32_t rgb = (g << 16) | (g << 8) | g;
          memcpy (&points.data[i * points.point_step + 12], &rgb, sizeof (int32_t));
        }
        else {
          memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
        }
      }
    }
  }
  else if (encoding == enc::RGB8) {
    for (int32_t u = 0; u < dense_points_.rows; ++u) {
      for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
        if (isValidPoint(dense_points_(u,v))) {
          const cv::Vec3b& rgb = color.at<cv::Vec3b>(u,v);
          int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2];
          memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t));
        }
        else {
          memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
        }
      }
    }
  }
  else if (encoding == enc::BGR8) {
    for (int32_t u = 0; u < dense_points_.rows; ++u) {
      for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
        if (isValidPoint(dense_points_(u,v))) {
          const cv::Vec3b& bgr = color.at<cv::Vec3b>(u,v);
          int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0];
          memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t));
        }
        else {
          memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
        }
      }
    }
  }
  else {
    ROS_WARN("Could not fill color channel of the point cloud, unrecognized encoding '%s'", encoding.c_str());
  }
}
Esempio n. 16
0
int brightness(int pic[][][], int i, int j,int len1,int len2){
  if(!isValidPoint(pic,i,j,len1,len2)) return 0;
  return pic[i][j][0]+pic[i][j][1]+pic[i][j][2];
}
Esempio n. 17
0
void StereoProcessor::processPoints(const stereo_msgs::DisparityImage& disparity,
                                    const cv::Mat& color, const std::string& encoding,
                                    const image_geometry::StereoCameraModel& model,
                                    sensor_msgs::PointCloud& points) const
{
  // Calculate dense point cloud
  const sensor_msgs::Image& dimage = disparity.image;
  const cv::Mat_<float> dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step);
  model.projectDisparityImageTo3d(dmat, dense_points_, true);

  // Fill in sparse point cloud message
  points.points.resize(0);
  points.channels.resize(3);
  points.channels[0].name = "rgb";
  points.channels[0].values.resize(0);
  points.channels[1].name = "u";
  points.channels[1].values.resize(0);
  points.channels[2].name = "v";
  points.channels[2].values.resize(0);
  
  for (int32_t u = 0; u < dense_points_.rows; ++u) {
    for (int32_t v = 0; v < dense_points_.cols; ++v) {
      if (isValidPoint(dense_points_(u,v))) {
        // x,y,z
        geometry_msgs::Point32 pt;
        pt.x = dense_points_(u,v)[0];
        pt.y = dense_points_(u,v)[1];
        pt.z = dense_points_(u,v)[2];
        points.points.push_back(pt);
        // u,v
        points.channels[1].values.push_back(u);
        points.channels[2].values.push_back(v);
      }
    }
  }

  // Fill in color
  namespace enc = sensor_msgs::image_encodings;
  points.channels[0].values.reserve(points.points.size());
  if (encoding == enc::MONO8) {
    for (int32_t u = 0; u < dense_points_.rows; ++u) {
      for (int32_t v = 0; v < dense_points_.cols; ++v) {
        if (isValidPoint(dense_points_(u,v))) {
          uint8_t g = color.at<uint8_t>(u,v);
          int32_t rgb = (g << 16) | (g << 8) | g;
          points.channels[0].values.push_back(*(float*)(&rgb));
        }
      }
    }
  }
  else if (encoding == enc::RGB8) {
    for (int32_t u = 0; u < dense_points_.rows; ++u) {
      for (int32_t v = 0; v < dense_points_.cols; ++v) {
        if (isValidPoint(dense_points_(u,v))) {
          const cv::Vec3b& rgb = color.at<cv::Vec3b>(u,v);
          int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2];
          points.channels[0].values.push_back(*(float*)(&rgb_packed));
        }
      }
    }
  }
  else if (encoding == enc::BGR8) {
    for (int32_t u = 0; u < dense_points_.rows; ++u) {
      for (int32_t v = 0; v < dense_points_.cols; ++v) {
        if (isValidPoint(dense_points_(u,v))) {
          const cv::Vec3b& bgr = color.at<cv::Vec3b>(u,v);
          int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0];
          points.channels[0].values.push_back(*(float*)(&rgb_packed));
        }
      }
    }
  }
  else {
    ROS_WARN("Could not fill color channel of the point cloud, unrecognized encoding '%s'", encoding.c_str());
  }
}