예제 #1
0
bool SmoothFilter::_filter(const FramePtr &in, FramePtr &out)
{
  ToFRawFrame *tofFrame = dynamic_cast<ToFRawFrame *>(in.get());
  DepthFrame *depthFrame = dynamic_cast<DepthFrame *>(in.get());
  
  if((!tofFrame && !depthFrame) || !_prepareOutput(in, out))
  {
    logger(LOG_ERROR) << "IIRFilter: Input frame type is not ToFRawFrame or DepthFrame or failed get the output ready" << std::endl;
    return false;
  }
  
  if(tofFrame)
  {
    _size = tofFrame->size;
    ToFRawFrame *o = dynamic_cast<ToFRawFrame *>(out.get());
    
    if(!o)
    {
      logger(LOG_ERROR) << "IIRFilter: Invalid frame type. Expecting ToFRawFrame." << std::endl;
      return false;
    }
    
    //logger(LOG_INFO) << "IIRFilter: Applying filter with gain = " << _gain << " to ToFRawFrame id = " << tofFrame->id << std::endl;
    
    uint s = _size.width*_size.height;
    memcpy(o->ambient(), tofFrame->ambient(), s*tofFrame->ambientWordWidth());
    memcpy(o->amplitude(), tofFrame->amplitude(), s*tofFrame->amplitudeWordWidth());
    memcpy(o->flags(), tofFrame->flags(), s*tofFrame->flagsWordWidth());
    
    if(tofFrame->phaseWordWidth() == 2)
      return _filter<uint16_t>((uint16_t *)tofFrame->phase(), (uint16_t *)o->phase());
    else if(tofFrame->phaseWordWidth() == 1)
      return _filter<uint8_t>((uint8_t *)tofFrame->phase(), (uint8_t *)o->phase());
    else if(tofFrame->phaseWordWidth() == 4)
      return _filter<uint32_t>((uint32_t *)tofFrame->phase(), (uint32_t *)o->phase());
    else
      return false;
  }
  else if(depthFrame)
  {
    _size = depthFrame->size;
    DepthFrame *o = dynamic_cast<DepthFrame *>(out.get());
    
    if(!o)
    {
      logger(LOG_ERROR) << "IIRFilter: Invalid frame type. Expecting DepthFrame." << std::endl;
      return false;
    }
    
    o->amplitude = depthFrame->amplitude;
    
    return _filter<float>(depthFrame->depth.data(), o->depth.data());
  }
  else
    return false;
}
예제 #2
0
void EffectMosaic::apply(FramePtr &src)
{

    for(int i=0; i < src->getHeight(); i+=mMosaicSize) {
        for(int j=0; j < src->getWidth(); j+=mMosaicSize) {
            auto pixel = AverageColor(src.get(), i, j);
            SetColor(src.get(), j, i, pixel);
        }
    }
}
예제 #3
0
void DepthFilter::initializeSeeds(FramePtr frame)
{

  list<PointFeat*> new_pt_features;
  list<LineFeat*> new_seg_features;

  if(Config::hasPoints())
  {
      /* detect new point features in point-unpopulated cells of the grid */
      // populate the occupancy grid of the detector with current features
      pt_feature_detector_->setExistingFeatures(frame->pt_fts_);
      // detect features to fill the free cells in the image
      pt_feature_detector_->detect(frame.get(), frame->img_pyr_,
                                Config::triangMinCornerScore(), new_pt_features);
  }

  if(Config::hasLines())
  {
      /* detect new segment features in line-unpopulated cells of the grid */
      // populate the occupancy grid of the detector with current features
      seg_feature_detector_->setExistingFeatures(frame->seg_fts_);
      // detect features
      seg_feature_detector_->detect(frame.get(), frame->img_pyr_,
                                Config::lsdMinLength(), new_seg_features);
  }

  // lock the parallel updating thread?
  seeds_updating_halt_ = true;
  lock_t lock(seeds_mut_); // by locking the updateSeeds function stops
  // increase by one the keyframe counter (to account for this new one)
  ++PointSeed::batch_counter;

  // initialize a point seed for every new point feature
  std::for_each(new_pt_features.begin(), new_pt_features.end(), [&](PointFeat* ftr){
    pt_seeds_.push_back(PointSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
  });

  // initialize a segment seed for every new segment feature
  std::for_each(new_seg_features.begin(), new_seg_features.end(), [&](LineFeat* ftr){
    seg_seeds_.push_back(LineSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
  });

  if(options_.verbose)
    SVO_INFO_STREAM("DepthFilter: Initialized "<<new_pt_features.size()<<" new point seeds and "
                    <<new_seg_features.size()<<" line segment seeds.");
  seeds_updating_halt_ = false;
}
예제 #4
0
InitResult KltHomographyInit::addSecondFrame(FramePtr frame_cur)
{
  trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
  SVO_INFO_STREAM("Init: KLT tracked "<< disparities_.size() <<" features");

  if(disparities_.size() < Config::initMinTracked())
    return FAILURE;

  double disparity = vk::getMedian(disparities_);
  SVO_INFO_STREAM("Init: KLT "<<disparity<<"px average disparity.");
  if(disparity < Config::initMinDisparity())
    return NO_KEYFRAME;

  computeHomography(
      f_ref_, f_cur_,
      frame_ref_->cam_->errorMultiplier2(), Config::poseOptimThresh(),
      inliers_, xyz_in_cur_, T_cur_from_ref_);
  SVO_INFO_STREAM("Init: Homography RANSAC "<<inliers_.size()<<" inliers.");

  if(inliers_.size() < Config::initMinInliers())
  {
    SVO_WARN_STREAM("Init WARNING: "<<Config::initMinInliers()<<" inliers minimum required.");
    return FAILURE;
  }

  // Rescale the map such that the mean scene depth is equal to the specified scale
  vector<double> depth_vec;
  for(size_t i=0; i<xyz_in_cur_.size(); ++i)
    depth_vec.push_back((xyz_in_cur_[i]).z());
  double scene_depth_median = vk::getMedian(depth_vec);
  double scale = Config::mapScale()/scene_depth_median;
  frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
  frame_cur->T_f_w_.translation() =
      -frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));

  // For each inlier create 3D point and add feature in both frames
  SE3 T_world_cur = frame_cur->T_f_w_.inverse();
  for(vector<int>::iterator it=inliers_.begin(); it!=inliers_.end(); ++it)
  {
    Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
    Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
    if(frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
    {
      Vector3d pos = T_world_cur * (xyz_in_cur_[*it]*scale);
      Point* new_point = new Point(pos);

      Feature* ftr_cur(new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0));
      frame_cur->addFeature(ftr_cur);
      new_point->addFrameRef(ftr_cur);

      Feature* ftr_ref(new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0));
      frame_ref_->addFeature(ftr_ref);
      new_point->addFrameRef(ftr_ref);
    }
  }
  return SUCCESS;
}
예제 #5
0
void DepthFilter::getSeedsCopy(const FramePtr& frame, std::list<Seed>& seeds)
{
  lock_t lock(seeds_mut_);
  for(std::list<Seed>::iterator it=seeds_.begin(); it!=seeds_.end(); ++it)
  {
    if (it->ftr->frame == frame.get())
      seeds.push_back(*it);
  }
}
bool PointCloudFrameGenerator::generate(const FramePtr &in, FramePtr &out)
{
  const DepthFrame *depthFrame = dynamic_cast<const DepthFrame *>(in.get());
  
  if(!depthFrame)
  {
    logger(LOG_ERROR) << "PointCloudFrameGenerator: Only DepthFrame type is supported as input to generate() function." << std::endl;
    return false;
  }
  
  XYZIPointCloudFrame *f = dynamic_cast<XYZIPointCloudFrame *>(out.get());
  
  if(!f)
  {
    f = new XYZIPointCloudFrame();
    out = FramePtr(f);
  }
  
  f->id = depthFrame->id;
  f->timestamp = depthFrame->timestamp;
  f->points.resize(depthFrame->size.width*depthFrame->size.height);
  
  if(!_pointCloudTransform->depthToPointCloud(depthFrame->depth, *f))
  {
    logger(LOG_ERROR) << "DepthCamera: Could not convert depth frame to point cloud frame" << std::endl;
    return false;
  }
  
  // Setting amplitude as intensity
  auto index = 0;
  
  auto w = depthFrame->size.width;
  auto h = depthFrame->size.height;
  
  for(auto y = 0; y < h; y++)
    for(auto x = 0; x < w; x++, index++)
    {
      IntensityPoint &p = f->points[index];
      p.i = depthFrame->amplitude[index];
    }
    
  return true;
}
예제 #7
0
bool Reprojector::reprojectCell(Cell& cell, FramePtr frame)
{
  cell.sort(boost::bind(&Reprojector::pointQualityComparator, _1, _2));
  Cell::iterator it=cell.begin();
  while(it!=cell.end())
  {
    ++n_trials_;

    if(it->pt->type_ == Point::TYPE_DELETED)
    {
      it = cell.erase(it);
      continue;
    }

    bool found_match = true;
    if(options_.find_match_direct)
      found_match = matcher_.findMatchDirect(*it->pt, *frame, it->px);
    if(!found_match)
    {
      it->pt->n_failed_reproj_++;
      if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_failed_reproj_ > 15)
        map_.safeDeletePoint(it->pt);
      if(it->pt->type_ == Point::TYPE_CANDIDATE  && it->pt->n_failed_reproj_ > 30)
        map_.point_candidates_.deleteCandidatePoint(it->pt);
      it = cell.erase(it);
      continue;
    }
    it->pt->n_succeeded_reproj_++;
    if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_succeeded_reproj_ > 10)
      it->pt->type_ = Point::TYPE_GOOD;

    Feature* new_feature = new Feature(frame.get(), it->px, matcher_.search_level_);
    frame->addFeature(new_feature);

    // Here we add a reference in the feature to the 3D point, the other way
    // round is only done if this frame is selected as keyframe.
    new_feature->point = it->pt;

    if(matcher_.ref_ftr_->type == Feature::EDGELET)
    {
      new_feature->type = Feature::EDGELET;
      new_feature->grad = matcher_.A_cur_ref_*matcher_.ref_ftr_->grad;
      new_feature->grad.normalize();
    }

    // If the keyframe is selected and we reproject the rest, we don't have to
    // check this point anymore.
    it = cell.erase(it);

    // Maximum one point per cell.
    return true;
  }
  return false;
}
예제 #8
0
void DepthFilter::removeKeyframe(FramePtr frame)
{
  seeds_updating_halt_ = true;
  lock_t lock(seeds_mut_);
  list<Seed>::iterator it=seeds_.begin();
  size_t n_removed = 0;
  while(it!=seeds_.end())
  {
    if(it->ftr->frame == frame.get())
    {
      it = seeds_.erase(it);
      ++n_removed;
    }
    else
      ++it;
  }
  seeds_updating_halt_ = false;
}
예제 #9
0
void DepthFilter::initializeSeeds(FramePtr frame)
{
  Features new_features;
  feature_detector_->setExistingFeatures(frame->fts_);
  feature_detector_->detect(frame.get(), frame->img_pyr_,
                            Config::triangMinCornerScore(), new_features);

  // initialize a seed for every new feature
  seeds_updating_halt_ = true;
  lock_t lock(seeds_mut_); // by locking the updateSeeds function stops
  ++Seed::batch_counter;
  std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
    seeds_.push_back(Seed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
  });

  if(options_.verbose)
    SVO_INFO_STREAM("DepthFilter: Initialized "<<new_features.size()<<" new seeds");
  seeds_updating_halt_ = false;
}
예제 #10
0
void detectFeatures(
    FramePtr frame,
    vector<cv::Point2f>& px_vec,
    vector<Vector3d>& f_vec)
{
  Features new_features;
  feature_detection::FastDetector detector(
      frame->img().cols, frame->img().rows, Config::gridSize(), Config::nPyrLevels());
  detector.detect(frame.get(), frame->img_pyr_, Config::triangMinCornerScore(), new_features);

  // now for all maximum corners, initialize a new seed
  px_vec.clear(); px_vec.reserve(new_features.size());
  f_vec.clear(); f_vec.reserve(new_features.size());
  std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
    px_vec.push_back(cv::Point2f(ftr->px[0], ftr->px[1]));
    f_vec.push_back(ftr->f);
    delete ftr;
  });
}
예제 #11
0
	/// 检测fast角度,输出的是对应的点和点的方向向量(可以考虑为点的反投影坐标)
	void Initialization::detectFeatures(
		FramePtr frame,
		std::vector<cv::Point2f>& px_vec,
		std::vector<Vector3d>& f_vec)
	{
		Features new_features;
		FastDetector detector(
			frame->img().cols, frame->img().rows, 25, 3);
		detector.detect(frame.get(), frame->img_pyr_, 20.0, new_features);

		// 返回特征位置和特征的单位向量
		px_vec.clear(); px_vec.reserve(new_features.size());
		f_vec.clear(); f_vec.reserve(new_features.size());
		std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
			px_vec.push_back(cv::Point2f(ftr->px[0], ftr->px[1]));
			f_vec.push_back(ftr->f);
			delete ftr;
		});
	}
예제 #12
0
		void DisplayFrame(Frame* pFrame)
		{
			if(pFrame != NULL && pFrame->HasValidDataPtr()) {
				if(GetFrameManager()->Owns(*pFrame)) {
					DoRender(pFrame);
				}
				else {
					FramePtr pTempFrame = GetReservedFrame();
					if(pTempFrame && pFrame->GetDataSize() == pTempFrame->GetDataSize()) {
						utils::image::Copy(pTempFrame->GetDataPtr(), pFrame->GetDataPtr(), pTempFrame->GetDataSize());
						DoRender(pTempFrame.get());
					}
					else
						LOG << TEXT("DECKLINK: Failed to get reserved frame");
				}
			}
			else {
				LOG << TEXT("DECKLINK: Tried to render frame with no data");
			}
		}
예제 #13
0
	InitResult Initialization::addSecondFrame(FramePtr frame_cur)
	{
		trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
		std::cout << "Init: KLT tracked " << disparities_.size() << " features" << std::endl;

		// 符合光流跟踪的特征数
		if (disparities_.size() < 50)
			return FAILURE;

		// 对两帧光流跟踪之后像素差值的中值
		double disparity = getMedian(disparities_);
		std::cout << "Init: KLT " << disparity << "px average disparity." << std::endl;
		//  如果中值小于给定配置参数,则表明这一帧不是关键帧,也就是刚开始的时候两帧不能太近
		if (disparity < 50.0)
			return NO_KEYFRAME;
		//  计算单应矩阵
		computeHomography(
			f_ref_, f_cur_,
			frame_ref_->cam_->getFocalLength(), 2.0,
			inliers_, xyz_in_cur_, T_cur_from_ref_);
		std::cout << "Init: Homography RANSAC " << inliers_.size() << " inliers." << std::endl;
		// 根据计算单应矩阵之后,内点个数判断是否跟踪
		if (inliers_.size() < 40)
		{
			std::cerr << "Init WARNING: 40 inliers minimum required." << std::endl;
			return FAILURE;
		}

		// 通过单应矩阵,对两帧之间的特征形成的3d点进行计算,计算这些3d的深度中值,转换到指定的scale
		std::vector<double> depth_vec;
		for (size_t i = 0; i < xyz_in_cur_.size(); ++i)
			depth_vec.push_back((xyz_in_cur_[i]).z());
		double scene_depth_median = getMedian(depth_vec);
		double scale = 1.0 / scene_depth_median;
		// 计算相对变换SE3
		frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;

		// 对位移变换添加尺度
		frame_cur->T_f_w_.translation() =
			-frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));
		
		// 对每个内点创建3D点,设置特征,添加到这两帧中
		SE3 T_world_cur = frame_cur->T_f_w_.inverse();
		for (std::vector<int>::iterator it = inliers_.begin(); it != inliers_.end(); ++it)
		{
			Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
			Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
			if (frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
			{
				Vector3d pos = T_world_cur * (xyz_in_cur_[*it] * scale);// 将相机下的点坐标转世界坐标
				Point3D *new_point = new Point3D(pos);

				Feature* ftr_cur = new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0);
				frame_cur->addFeature(ftr_cur);
				// 将同一个点对应的特征保存起来,这样点删除了,对应的特征都可以删除
				new_point->addFrameRef(ftr_cur);

				Feature* ftr_ref = new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0);
				frame_ref_->addFeature(ftr_ref);
				new_point->addFrameRef(ftr_ref);
			}
		}
		return SUCCESS;
	}
예제 #14
0
/**
* @brief 
*
* @return 
*/
int H264Encoder::run()
{
    // TODO - This section needs to be rewritten to read the configuration from the values saved
    // for the streams via the web gui
    AVDictionary *opts = NULL;
    //avSetH264Preset( &opts, "default" );
    //avSetH264Profile( &opts, "main" );
    //avDictSet( &opts, "level", "4.1" );
    avSetH264Preset( &opts, "ultrafast" );
    //avSetH264Profile( &opts, "baseline" );
    avDictSet( &opts, "level", "31" );
    avDictSet( &opts, "g", "24" );
    //avDictSet( &opts, "b", (int)mBitRate );
    //avDictSet( &opts, "bitrate", (int)mBitRate );
    //avDictSet( &opts, "crf", "24" );
    //avDictSet( &opts, "framerate", (double)mFrameRate );
    //avDictSet( &opts, "fps", (double)mFrameRate );
    //avDictSet( &opts, "r", (double)mFrameRate );
    //avDictSet( &opts, "timebase", "1/90000" );
    avDumpDict( opts );

    // Make sure ffmpeg is compiled with libx264 support
    AVCodec *codec = avcodec_find_encoder( CODEC_ID_H264 );
    if ( !codec )
        Fatal( "Can't find encoder codec" );

    mCodecContext = avcodec_alloc_context3( codec );

    mCodecContext->width = mWidth;
    mCodecContext->height = mHeight;
    //mCodecContext->time_base = TimeBase( 1, 90000 );
    mCodecContext->time_base = mFrameRate.timeBase();
    mCodecContext->bit_rate = mBitRate;
    mCodecContext->pix_fmt = mPixelFormat;

    mCodecContext->gop_size = 24;
    //mCodecContext->max_b_frames = 1;

    Debug( 2, "Time base = %d/%d", mCodecContext->time_base.num, mCodecContext->time_base.den );
    Debug( 2, "Pix fmt = %d", mCodecContext->pix_fmt );

    /* open it */
    if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 )
        Fatal( "Unable to open encoder codec" );

    avDumpDict( opts );
    AVFrame *inputFrame = avcodec_alloc_frame();

    Info( "%s:Waiting", cidentity() );
    if ( waitForProviders() )
    {
        Info( "%s:Waited", cidentity() );

        // Find the source codec context
        uint16_t inputWidth = videoProvider()->width();
        uint16_t inputHeight = videoProvider()->height();
        PixelFormat inputPixelFormat = videoProvider()->pixelFormat();
        //FrameRate inputFrameRate = videoProvider()->frameRate();
        //Info( "CONVERT: %d-%dx%d -> %d-%dx%d",
            //inputPixelFormat, inputWidth, inputHeight,
            //mPixelFormat, mWidth, mHeight
        //);

        // Make space for anything that is going to be output
        AVFrame *outputFrame = avcodec_alloc_frame();
        ByteBuffer outputBuffer;
        outputBuffer.size( avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height ) );
        avpicture_fill( (AVPicture *)outputFrame, outputBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height );

        // Prepare for image format and size conversions
        struct SwsContext *convertContext = sws_getContext( inputWidth, inputHeight, inputPixelFormat, mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
        if ( !convertContext )
            Fatal( "Unable to create conversion context for encoder" );

        int outSize = 0;
        uint64_t timeInterval = mFrameRate.intervalUsec();
        uint64_t currTime = time64();
        uint64_t nextTime = currTime;
        //outputFrame->pts = currTime;
        outputFrame->pts = 0;
        uint32_t ptsInterval = 90000/mFrameRate.toInt();
        //uint32_t ptsInterval = mFrameRate.intervalPTS( mCodecContext->time_base );
        while ( !mStop )
        {
            // Synchronise the output with the desired output frame rate
            while ( currTime < nextTime )
            {
                currTime = time64();
                usleep( 1000 );
            }
            nextTime += timeInterval;

            FramePtr framePtr;
            mQueueMutex.lock();
            if ( !mFrameQueue.empty() )
            {
                if ( mInitialFrame.empty() || !mConsumers.empty() )
                {
                    FrameQueue::iterator iter = mFrameQueue.begin();
                    framePtr = *iter;
                }
                mFrameQueue.clear();
            }
            mQueueMutex.unlock();

            if ( framePtr.get() )
            {
                const FeedFrame *frame = framePtr.get();
                const VideoFrame *inputVideoFrame = dynamic_cast<const VideoFrame *>(frame);

                //Info( "Provider: %s, Source: %s, Frame: %d", inputVideoFrame->provider()->cidentity(), inputVideoFrame->originator()->cidentity(), inputVideoFrame->id() );
                //Info( "PF:%d @ %dx%d", inputVideoFrame->pixelFormat(), inputVideoFrame->width(), inputVideoFrame->height() );

                avpicture_fill( (AVPicture *)inputFrame, inputVideoFrame->buffer().data(), inputPixelFormat, inputWidth, inputHeight );

                //outputFrame->pts = currTime;
                //Debug( 5, "PTS %jd", outputFrame->pts );
       
                // Reformat the input frame to fit the desired output format
                //Info( "SCALE: %d -> %d", int(inputFrame->data[0])%16, int(outputFrame->data[0])%16 );
                if ( sws_scale( convertContext, inputFrame->data, inputFrame->linesize, 0, inputHeight, outputFrame->data, outputFrame->linesize ) < 0 )
                    Fatal( "Unable to convert input frame (%d@%dx%d) to output frame (%d@%dx%d) at frame %ju", inputPixelFormat, inputWidth, inputHeight, mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height, mFrameCount );

                // Encode the image
                outSize = avcodec_encode_video( mCodecContext, outputBuffer.data(), outputBuffer.capacity(), outputFrame );
                Debug( 5, "Encoding reports %d bytes", outSize );
                if ( outSize > 0 )
                {
                    //Info( "CPTS: %jd", mCodecContext->coded_frame->pts );
                    outputBuffer.size( outSize );
                    //Debug( 5, "PTS2 %jd", mCodecContext->coded_frame->pts );
                    if ( mInitialFrame.empty() )
                    {
                        Debug( 3, "Looking for H.264 stream info" );
                        const uint8_t *startPos = outputBuffer.head();
                        startPos = h264StartCode( startPos, outputBuffer.tail() );
                        while ( startPos < outputBuffer.tail() )
                        {
                            while( !*(startPos++) )
                                ;
                            const uint8_t *nextStartPos = h264StartCode( startPos, outputBuffer.tail() );

                            int frameSize = nextStartPos-startPos;

                            unsigned char type = startPos[0] & 0x1F;
                            unsigned char nri = startPos[0] & 0x60;
                            Debug( 1, "Type %d, NRI %d (%02x)", type, nri>>5, startPos[0] );

                            if ( type == NAL_SEI )
                            {
                                // SEI
                                mSei.assign( startPos, frameSize );
                            }
                            else if ( type == NAL_SPS )
                            {
                                // SPS
                                Hexdump( 2, startPos, frameSize );
                                mSps.assign( startPos, frameSize );

                                if ( frameSize < 4 )
                                    Panic( "H.264 NAL type 7 frame too short (%d bytes) to extract level/profile", frameSize );
                                mAvcLevel = startPos[3];
                                mAvcProfile = startPos[1];
                                Debug( 2, "Got AVC level %d, profile %d", mAvcLevel, mAvcProfile );
                            }
                            else if ( type == NAL_PPS )
                            {
                                // PPS
                                Hexdump( 2, startPos, frameSize );
                                mPps.assign( startPos, frameSize );
                            }
                            startPos = nextStartPos;
                        }
                        mInitialFrame = outputBuffer;
                        //VideoFrame *outputVideoFrame = new VideoFrame( this, ++mFrameCount, mCodecContext->coded_frame->pts, mInitialFrame );
                    }
                    else
                    {
                        //av_rescale_q(cocontext->coded_frame->pts, cocontext->time_base, videostm->time_base); 
                        VideoFrame *outputVideoFrame = new VideoFrame( this, ++mFrameCount, mCodecContext->coded_frame->pts, outputBuffer );
                        distributeFrame( FramePtr( outputVideoFrame ) );
                    }
                }
                outputFrame->pts += ptsInterval;   ///< FIXME - This can't be right, but it works...
            }
예제 #15
0
InitResult KltHomographyInit::addSecondFrame(FramePtr frame_cur, Matrix3d orient, Vector3d pos)
{
  trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
  SVO_INFO_STREAM("Init: KLT tracked "<< disparities_.size() <<" features");

  if(disparities_.size() < Config::initMinTracked())
    return FAILURE;

  double disparity = vk::getMedian(disparities_);
  SVO_INFO_STREAM("Init: KLT "<<disparity<<"px average disparity.");
  if(disparity < Config::initMinDisparity())
    return NO_KEYFRAME;

  computeHomography(
      f_ref_, f_cur_,
      frame_ref_->cam_->errorMultiplier2(), Config::poseOptimThresh(),
      inliers_, xyz_in_cur_, T_cur_from_ref_);
  SVO_INFO_STREAM("Init: Homography RANSAC "<<inliers_.size()<<" inliers.");

  if(inliers_.size() < Config::initMinInliers())
  {
    SVO_WARN_STREAM("Init WARNING: "<<Config::initMinInliers()<<" inliers minimum required.");
    return FAILURE;
  }

  // Transformation in real world
  T_cur_frame_real_scale = SE3(orient, pos);
  Vector3d trans = T_cur_frame_real_scale.translation() - T_first_frame_real_scale.translation();
  double length_real = sqrt(pow(trans[0],2) + pow(trans[1],2) + pow(trans[2],2));

  SVO_INFO_STREAM("Real world transform x: " << trans[0] << " y:" << trans[1] << " z:" << trans[2] << " length:" << length_real);

  double x = T_cur_from_ref_.translation()[0];
  double y = T_cur_from_ref_.translation()[1];
  double z = T_cur_from_ref_.translation()[2];
  double length_svo = sqrt(pow(x,2) + pow(y,2) + pow(z,2));

  SVO_INFO_STREAM("SVO transform x: " << x << " y:" << y << " z:" << z << " length:" << length_svo);

#ifdef USE_ASE_IMU
  // Rescale the map such that the real length of the movement matches with the svo movement length
  double scale =length_real / length_svo;
#else
  // Rescale the map such that the mean scene depth is equal to the specified scale
  vector<double> depth_vec;
  for(size_t i=0; i<xyz_in_cur_.size(); ++i)
    depth_vec.push_back((xyz_in_cur_[i]).z());
  double scene_depth_median = vk::getMedian(depth_vec);
  double scale = Config::mapScale()/scene_depth_median;
#endif
  frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
  frame_cur->T_f_w_.translation() =
     -frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));

  //frame_cur->T_f_w_ = T_cur_frame_real_scale;
  //frame_ref_->T_f_w_ = T_first_frame_real_scale;

//  // Rescale the map such that the mean scene depth is equal to the specified scale
//  vector<double> depth_vec;
//  for(size_t i=0; i<xyz_in_cur_.size(); ++i)
//    depth_vec.push_back((xyz_in_cur_[i]).z());
//  double scene_depth_median = vk::getMedian(depth_vec);
//  double scale = Config::mapScale()/scene_depth_median;
//  frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
//  frame_cur->T_f_w_.translation() =
//      -frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));

  // For each inlier create 3D point and add feature in both frames
  SE3 T_world_cur = frame_cur->T_f_w_.inverse();
  for(vector<int>::iterator it=inliers_.begin(); it!=inliers_.end(); ++it)
  {
    Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
    Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
    if(frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
    {
      Vector3d pos = T_world_cur * (xyz_in_cur_[*it]*scale);
      Point* new_point = new Point(pos);

      Feature* ftr_cur(new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0));
      frame_cur->addFeature(ftr_cur);
      new_point->addFrameRef(ftr_cur);

      Feature* ftr_ref(new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0));
      frame_ref_->addFeature(ftr_ref);
      new_point->addFrameRef(ftr_ref);
    }
  }
  return SUCCESS;
}
예제 #16
0
bool NotifyOutput::processFrame( FramePtr frame )
{
    Info( "Got frame" );

    const NotifyFrame *notifyFrame = dynamic_cast<const NotifyFrame *>(frame.get());

    // Anything else we ignore
    if ( notifyFrame )
    {
        const DiskIONotification *diskNotifyFrame = dynamic_cast<const DiskIONotification *>(notifyFrame);
        if ( diskNotifyFrame )
        {
            printf( "Got disk I/O notification frame\n" );
            const DiskIONotification::DiskIODetail &detail = diskNotifyFrame->detail();
            std::string typeString;
            switch( detail.type() )
            {
                case DiskIONotification::DiskIODetail::READ :
                {
                    typeString = "read";
                    break;
                }
                case DiskIONotification::DiskIODetail::WRITE :
                {
                    typeString = "write";
                    break;
                }
                case DiskIONotification::DiskIODetail::APPEND :
                {
                    typeString = "append";
                    break;
                }
            }
            switch ( detail.stage() )
            {
                case DiskIONotification::DiskIODetail::BEGIN :
                {
                    printf( "  DiskIO %s starting for file %s\n", typeString.c_str(), detail.name().c_str() );
                    break;
                }
                case DiskIONotification::DiskIODetail::PROGRESS :
                {
                    printf( "  DiskIO %s in progress for file %s\n", typeString.c_str(), detail.name().c_str() );
                    break;
                }
                case DiskIONotification::DiskIODetail::COMPLETE :
                {
                    printf( "  DiskIO %s completed for file %s, %ju bytes, result %d\n", typeString.c_str(), detail.name().c_str(), detail.size(), detail.result() );
                    break;
                }
            }
        }
        const EventNotification *eventNotifyFrame = dynamic_cast<const EventNotification *>(notifyFrame);
        if ( eventNotifyFrame )
        {
            printf( "Got event notification frame\n" );
            const EventNotification::EventDetail &detail = eventNotifyFrame->detail();
            switch ( detail.stage() )
            {
                case EventNotification::EventDetail::BEGIN :
                {
                    printf( "  Event %ju starting\n", detail.id() );
                    break;
                }
                case EventNotification::EventDetail::PROGRESS :
                {
                    printf( "  Event %ju in progress\n", detail.id() );
                    break;
                }
                case EventNotification::EventDetail::COMPLETE :
                {
                    printf( "  Event %ju completed, %.2lf seconds long\n", detail.id(), detail.length() );
                    break;
                }
            }
        }
    }
    return( true );
}
예제 #17
0
void FramePlaybackControl::DoRender(HANDLE& handle, bool bPureCG) 
{
	//Get next CG-frame if we have a CGProducer
	FramePtr pCGFrame;
	if(pCGProducer_) {
		pCGFrame = pCGProducer_->GetFrameBuffer().front();
		FrameBufferFetchResult fetchResult = pCGProducer_->GetFrameBuffer().pop_front();
		if(pCGFrame != 0) {
			pLastCGFrame_ = pCGFrame;
		}
		else if(fetchResult != FetchEOF) {
			pCGFrame = pLastCGFrame_;
		}
		else {
			pCGProducer_.reset();
			LOG << LogLevel::Debug << TEXT("Frameplayback: Cleared CGProducer");
			OnCGEmpty();
			pLastCGFrame_.reset();
		}
	}

	//Get next video frame unless we're in PureCG-mode
	FramePtr pVideoFrame;
	if(!bPureCG || activeClip_.bStopped_) {
		if(frameQueue_.size() > 0) {
			pVideoFrame = frameQueue_.front();
			frameQueue_.pop();

			if(pVideoFrame != 0) {
				pLastVideoFrame_ = pVideoFrame;
			}
		}
		else {
			pVideoFrame = pLastVideoFrame_;
		}

		if(activeClip_.bStopped_ && !frameQueue_.empty())
			eventRender_.Set();
	}
	else {
		pVideoFrame = pLastVideoFrame_;
	}

	//combine and send to consumer
	FramePtr pResultFrame;
	if(pCGFrame) {
		if(pVideoFrame && this->activeClip_.pFP_ != this->emptyProducer_) {
			pResultFrame = pStrategy_->GetReservedFrame();
			if(pResultFrame) {
				utils::image::PreOver(pResultFrame->GetDataPtr(), pVideoFrame->GetDataPtr(), pCGFrame->GetDataPtr(), pResultFrame->GetDataSize());
				pResultFrame->GetAudioData().insert(pResultFrame->GetAudioData().end(), pVideoFrame->GetAudioData().begin(), pVideoFrame->GetAudioData().end());
				pResultFrame->GetAudioData().insert(pResultFrame->GetAudioData().end(), pCGFrame->GetAudioData().begin(), pCGFrame->GetAudioData().end());
			}
		}
		else
			pResultFrame = pCGFrame;
	}
	else
		pResultFrame = pVideoFrame;

	if(pResultFrame)
		pStrategy_->DisplayFrame(pResultFrame.get());
	else if(bPureCG) {
		pResultFrame = pStrategy_->GetReservedFrame();
		if(pResultFrame) {
			utils::image::Clear(pResultFrame->GetDataPtr(), pResultFrame->GetDataSize());
			pStrategy_->DisplayFrame(pResultFrame.get());
		}
	}
}
예제 #18
0
/**
* @brief 
*
* @param frame
* @param 
*
* @return 
*/
bool FeedProvider::dataFramesOnly( const FramePtr &frame, const FeedConsumer * )
{
    const DataFrame *dataFrame = dynamic_cast<const DataFrame *>(frame.get());
    return( dataFrame != NULL );
}
예제 #19
0
/**
* @brief 
*
* @param frame
* @param 
*
* @return 
*/
bool FeedProvider::audioFramesOnly( const FramePtr &frame, const FeedConsumer * )
{
    const AudioFrame *audioFrame = dynamic_cast<const AudioFrame *>(frame.get());
    return( audioFrame != NULL );
}
예제 #20
0
USING_YJJ_NAMESPACE

/**
 * entrance for journal tool
 */
int main(int argc, const char* argv[])
{
    options_description desc{"Options"};
    string format("%Y%m%d-%H:%M:%S");
    desc.add_options()
            ("help,h", "Help screen")
            ("folder,f", value<string>(), "Journal Folder")
            ("name,n", value<string>(), "Journal Name")
            ("page,p", "Just Page Header")
            ("verify,v", "Verify hash code")
            ("keep,k", "keep listening")
            ("time,t", "time visualized")
            ("detail,d", "data details")
            ("current,c", "start from current")
            ("length,l", value<int>()->default_value(20), "Char num of frame data to print")
            ("msgtype,m", value<string>()->default_value(""), "Message type printed, -eg: -m 10,11")
            ("rmsgtype,r", value<string>()->default_value(""), "Message type not printed, -eg: -r 10,11")
            ("start_time,s", value<string>()->default_value("20000101-13:30:00"), "start time (%Y%m%d-%H:%M:%S)")
            ("end_time,e", value<string>()->default_value("20200101-00:00:00"), "end time (%Y%m%d-%H:%M:%S)");

    variables_map vm;
    store(parse_command_line(argc, argv, desc), vm);
    notify(vm);

    if (vm.count("help"))
    {
        std::cout << desc << '\n';
        std::cout << "----frame tags----" << std::endl;
        std::cout << " (st): status" << std::endl;
        std::cout << " (so): source" << std::endl;
        std::cout << " (na): nano time" << std::endl;
        std::cout << " (en): extra nano time" << std::endl;
        std::cout << " (vn): visualized nano (-t only)" << std::endl;
        std::cout << " (fl): frame length" << std::endl;
        std::cout << " (dl): data length" << std::endl;
        std::cout << " (hl): header length" << std::endl;
        std::cout << " (hs): hash code" << std::endl;
        std::cout << " (mt): msg type" << std::endl;
        std::cout << " (lf): last flag" << std::endl;
        std::cout << " (id): request id" << std::endl;
        std::cout << " (er): error id" << std::endl;
        std::cout << " (em): error message" << std::endl;
        std::cout << " (cn): data content" << std::endl;
        std::cout << std::endl;
        std::cout << "----page tags----" << std::endl;
        std::cout << " (ST): page status" << std::endl;
        std::cout << " (JN): journal name" << std::endl;
        std::cout << " (PN): page number" << std::endl;
        std::cout << " (FN): total number of frame in this page" << std::endl;
        std::cout << " (LP): last position" << std::endl;
        std::cout << " (FV): frame version" << std::endl;
        std::cout << " (SN): start nano" << std::endl;
        std::cout << " (CN): close nano" << std::endl;
        return 0;
    }
    string start_time = vm.count("current") ? parseNano(getNanoTime(), format.c_str()) : vm["start_time"].as<string>();
    if (vm.count("start_time"))
    {
        std::cout << "StartTime:\t" << start_time << std::endl;
    }
    if (vm.count("end_time"))
    {
        std::cout << "EndTime:\t" << vm["end_time"].as<string>() << std::endl;
    }
    if (vm.count("folder"))
    {
        std::cout << "Folder:\t" << vm["folder"].as<string>() << std::endl;
    }
    if (vm.count("name"))
    {
        std::cout << "ShortName:\t" << vm["name"].as<string>() << std::endl;
    }

    string folder = vm["folder"].as<string>();
    string jname = vm["name"].as<string>();

    if (vm.count("page"))
    {
        vector<short> pageNums = PageUtil::GetPageNums(folder, jname);

        for (size_t idx = 0; idx < pageNums.size(); idx++)
        {
            PageHeader header = PageUtil::GetPageHeader(folder, jname, pageNums[idx]);
            std::cout << "[" << pageNums[idx] << "]"
                      << " (ST)" << (short)header.status
                      << " (JN)" << header.journal_name
                      << " (PN)" << header.page_num
                      << " (FN)" << header.frame_num
                      << " (LP)" << header.last_pos
                      << " (FV)" << header.frame_version
                      << " (SN)" << header.start_nano << "[" << parseNano(header.start_nano, format.c_str()) << "]"
                      << " (CN)" << header.close_nano << "[" << parseNano(header.close_nano, format.c_str()) << "]";
            std::cout << std::endl;
        }
    }
    else
    {
        bool toVerify = vm.count("verify");
        bool toTimeVisual = vm.count("time");
        bool needDetail = vm.count("detail");
        long sn = parseTime(start_time.c_str(), format.c_str());
        long en = parseTime(vm["end_time"].as<string>().c_str(), format.c_str());
        int dataLengthToPrint = vm["length"].as<int>();

        map<short, bool> doMsgTypes, nonMsgTypes;
        string doMtStr = vm["msgtype"].as<string>();
        bool mtDoList = doMtStr.length() > 0;
        if (mtDoList)
        {
            std::stringstream ss(doMtStr);
            int i;
            while (ss >> i)
            {
                doMsgTypes[i] = true;
                if (ss.peek() == ',')
                    ss.ignore();
            }
            std::cout << "MsgType to Print:";
            for (auto& item: doMsgTypes)
            {
                std::cout << " " << item.first;
            }
            std::cout << std::endl;
        }
        string nonMtStr = vm["rmsgtype"].as<string>();
        bool mtNonList = nonMtStr.length() > 0;
        if (mtNonList)
        {
            std::stringstream ss(nonMtStr);
            int i;
            while (ss >> i)
            {
                nonMsgTypes[i] = true;
                if (ss.peek() == ',')
                {
                    ss.ignore();
                }
            }
            std::cout << "MsgType to Skip:";
            for (auto& item: nonMsgTypes)
            {
                std::cout << " " << item.first;
            }
            std::cout << std::endl;
        }
        JournalReaderPtr reader = JournalReader::create(folder, jname, sn, "JournalPrinter");
        int i = 0;
        do
        {
            FramePtr frame = reader->getNextFrame();
            while (frame.get() != nullptr && en > frame->getNano())
            {
                short msgType = frame->getMsgType();
                bool toPrint = true;
                if ((mtDoList && doMsgTypes.find(msgType) == doMsgTypes.end())
                    || (mtNonList && nonMsgTypes.find(msgType) != nonMsgTypes.end()))
                {
                    toPrint = false;
                }
                if (toPrint)
                {
                    std::cout << "[" << i++ << "]"
                              << " (st)" << (short)frame->getStatus()
                              << " (so)" << frame->getSource()
                              << " (na)" << frame->getNano()
                              << " (en)" << frame->getExtraNano();
                    if (toTimeVisual)
                    {
                        std::cout << " (vn)" << parseNano(frame->getNano(), format.c_str());
                    }
                    std::cout << " (fl)" << frame->getFrameLength()
                              << " (dl)" << frame->getDataLength()
                              << " (hl)" << frame->getHeaderLength()
                              << " (hs)" << frame->getHashCode()
                              << " (mt)" << frame->getMsgType()
                              << " (lf)" << (short)frame->getLastFlag()
                              << " (id)" << frame->getRequestId()
                              << " (er)" << frame->getErrorId();
                    if (frame->getErrorMsg() != nullptr)
                    {
                        std::cout << " (em)" << frame->getErrorMsg();
                    }
                    if (dataLengthToPrint > 0)
                    {
                        std::cout << " (cn)" << string((char*)frame->getData(), std::min(dataLengthToPrint, frame->getDataLength()));
                    }
                    if (toVerify)
                    {
                        FH_TYPE_HASHNM hash = MurmurHash2(frame->getData(), frame->getDataLength(), HASH_SEED);
                        if (hash != frame->getHashCode())
                        {
                            std::cerr << std::endl << std::endl
                                      << "Hash Code mismatch: "
                                      << "[frame] " << frame->getHashCode()
                                      << "  [actual] " << hash << std::endl;
                            return -1;
                        }
                        else
                        {
                            std::cout << " (*)";
                        }
                    }
                    std::cout << std::endl;
                    if (needDetail)
                    {
                        printData(frame->getData(), msgType);
                    }
                }
                frame = reader->getNextFrame();
            }
        }
        while (vm.count("keep"));
    }
    return 0;
}