Exemple #1
0
void Visualizer::visualizeMarkers(
    const FramePtr& frame,
    const set<FramePtr>& core_kfs,
    const Map& map)
{
  if(frame == NULL)
    return;

  vk::output_helper::publishTfTransform(
      frame->T_f_w_*T_world_from_vision_.inverse(),
      ros::Time(frame->timestamp_), "cam_pos", "worldNED", br_);

  if(pub_frames_.getNumSubscribers() > 0 || pub_points_.getNumSubscribers() > 0)
  {
    vk::output_helper::publishHexacopterMarker(
        pub_frames_, "cam_pos", "cams", ros::Time(frame->timestamp_),
        1, 0, 0.3*VIS_SCALE, Vector3d(0.,0.,1.));
    vk::output_helper::publishPointMarker(
        pub_points_, T_world_from_vision_*frame->pos(), "trajectory",
        ros::Time::now(), trace_id_, 0, 0.006*VIS_SCALE, Vector3d(0.,0.,0.5));
    if(frame->isKeyframe() || publish_map_every_frame_)
      publishMapRegion(core_kfs);
    removeDeletedPts(map);
  }
}
/**
* @brief 
*
* @param writeable
* @param frame
*
* @return 
*/
bool HttpImageStream::sendFrame( Select::CommsList &writeable, FramePtr frame )
{
    const ByteBuffer &packet = frame->buffer();

    std::string txHeaders = "--imgboundary\r\n";
    txHeaders += "Content-Type: image/jpeg\r\n";
    txHeaders += stringtf( "Content-Length: %zd\r\n\r\n", packet.size() );

    ByteBuffer txBuffer;
    txBuffer.reserve( packet.size()+128 );
    txBuffer.append( txHeaders.c_str(), txHeaders.size() );
    txBuffer.append( packet.data(), packet.size() );
    txBuffer.append( "\r\n", 2 );

    for ( Select::CommsList::iterator iter = writeable.begin(); iter != writeable.end(); iter++ )
    {
        if ( TcpInetSocket *socket = dynamic_cast<TcpInetSocket *>(*iter) )
        {
            if ( socket == mConnection->socket() )
            {
                int nBytes = socket->write( txBuffer.data(), txBuffer.size() );
                const FeedFrame *sourceFrame = frame->sourceFrame();
                Debug( 4, "Wrote %d bytes on sd %d, frame %ju<-%ju", nBytes, socket->getWriteDesc(), frame->id(), sourceFrame->id() );
                if ( nBytes != txBuffer.size() )
                {
                    Error( "Incomplete write, %d bytes instead of %zd", nBytes, packet.size() );
                    mStop = true;
                    return( false );
                }
            }
        }
    }
    return( true );
}
Exemple #3
0
void DepthFilter::updateSeedsLoop()
{
  while(!boost::this_thread::interruption_requested())
  {
    FramePtr frame;
    {
      lock_t lock(frame_queue_mut_);
      while(frame_queue_.empty() && new_keyframe_set_ == false)
        frame_queue_cond_.wait(lock);
      if(new_keyframe_set_)
      {
        new_keyframe_set_ = false;
        seeds_updating_halt_ = false;
        clearFrameQueue();
        frame = new_keyframe_;
      }
      else
      {
        frame = frame_queue_.front();
        frame_queue_.pop();
      }
    }
    updateSeeds(frame);
    if(frame->isKeyframe())
      initializeSeeds(frame);
  }
}
Exemple #4
0
void RenderThread::UpdateLayout()
{
	QMutexLocker locker(&m_MutexLayout);

	// Find frames without layout info and delete them
	QMutableListIterator<FramePtr> i(m_RenderFrames);
	while (i.hasNext()) 
	{
		FramePtr renderFrame = i.next();
		if (!renderFrame)
		{
			i.remove();
			continue;
		}

		int j = m_ViewIDs.indexOf(renderFrame->Info(VIEW_ID).toUInt());
		if (j == -1)
		{
			m_Renderer->Deallocate(renderFrame);
			i.remove();
		}else
		{
			QRect srcRect = m_SrcRects.at(j);
			float scaleX = renderFrame->Info(RENDER_SRC_SCALE_X).toFloat();
			float scaleY = renderFrame->Info(RENDER_SRC_SCALE_Y).toFloat();
			srcRect.setLeft((int)(srcRect.left()*scaleX));
			srcRect.setRight((int)(srcRect.right()*scaleX));
			srcRect.setTop((int)(srcRect.top()*scaleY));
			srcRect.setBottom((int)(srcRect.bottom()*scaleY));

			renderFrame->SetInfo(SRC_RECT, srcRect);
			renderFrame->SetInfo(DST_RECT, m_DstRects.at(j));
		}
	}
}
Exemple #5
0
InitResult KltHomographyInit::addSecondFrame(FramePtr frame_cur)
{
  trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
  SVO_INFO_STREAM("Init: KLT tracked "<< disparities_.size() <<" features");

  if(disparities_.size() < Config::initMinTracked())
    return FAILURE;

  double disparity = vk::getMedian(disparities_);
  SVO_INFO_STREAM("Init: KLT "<<disparity<<"px average disparity.");
  if(disparity < Config::initMinDisparity())
    return NO_KEYFRAME;

  computeHomography(
      f_ref_, f_cur_,
      frame_ref_->cam_->errorMultiplier2(), Config::poseOptimThresh(),
      inliers_, xyz_in_cur_, T_cur_from_ref_);
  SVO_INFO_STREAM("Init: Homography RANSAC "<<inliers_.size()<<" inliers.");

  if(inliers_.size() < Config::initMinInliers())
  {
    SVO_WARN_STREAM("Init WARNING: "<<Config::initMinInliers()<<" inliers minimum required.");
    return FAILURE;
  }

  // Rescale the map such that the mean scene depth is equal to the specified scale
  vector<double> depth_vec;
  for(size_t i=0; i<xyz_in_cur_.size(); ++i)
    depth_vec.push_back((xyz_in_cur_[i]).z());
  double scene_depth_median = vk::getMedian(depth_vec);
  double scale = Config::mapScale()/scene_depth_median;
  frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
  frame_cur->T_f_w_.translation() =
      -frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));

  // For each inlier create 3D point and add feature in both frames
  SE3 T_world_cur = frame_cur->T_f_w_.inverse();
  for(vector<int>::iterator it=inliers_.begin(); it!=inliers_.end(); ++it)
  {
    Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
    Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
    if(frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
    {
      Vector3d pos = T_world_cur * (xyz_in_cur_[*it]*scale);
      Point* new_point = new Point(pos);

      Feature* ftr_cur(new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0));
      frame_cur->addFeature(ftr_cur);
      new_point->addFrameRef(ftr_cur);

      Feature* ftr_ref(new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0));
      frame_ref_->addFeature(ftr_ref);
      new_point->addFrameRef(ftr_ref);
    }
  }
  return SUCCESS;
}
Exemple #6
0
bool SmoothFilter::_filter(const FramePtr &in, FramePtr &out)
{
  ToFRawFrame *tofFrame = dynamic_cast<ToFRawFrame *>(in.get());
  DepthFrame *depthFrame = dynamic_cast<DepthFrame *>(in.get());
  
  if((!tofFrame && !depthFrame) || !_prepareOutput(in, out))
  {
    logger(LOG_ERROR) << "IIRFilter: Input frame type is not ToFRawFrame or DepthFrame or failed get the output ready" << std::endl;
    return false;
  }
  
  if(tofFrame)
  {
    _size = tofFrame->size;
    ToFRawFrame *o = dynamic_cast<ToFRawFrame *>(out.get());
    
    if(!o)
    {
      logger(LOG_ERROR) << "IIRFilter: Invalid frame type. Expecting ToFRawFrame." << std::endl;
      return false;
    }
    
    //logger(LOG_INFO) << "IIRFilter: Applying filter with gain = " << _gain << " to ToFRawFrame id = " << tofFrame->id << std::endl;
    
    uint s = _size.width*_size.height;
    memcpy(o->ambient(), tofFrame->ambient(), s*tofFrame->ambientWordWidth());
    memcpy(o->amplitude(), tofFrame->amplitude(), s*tofFrame->amplitudeWordWidth());
    memcpy(o->flags(), tofFrame->flags(), s*tofFrame->flagsWordWidth());
    
    if(tofFrame->phaseWordWidth() == 2)
      return _filter<uint16_t>((uint16_t *)tofFrame->phase(), (uint16_t *)o->phase());
    else if(tofFrame->phaseWordWidth() == 1)
      return _filter<uint8_t>((uint8_t *)tofFrame->phase(), (uint8_t *)o->phase());
    else if(tofFrame->phaseWordWidth() == 4)
      return _filter<uint32_t>((uint32_t *)tofFrame->phase(), (uint32_t *)o->phase());
    else
      return false;
  }
  else if(depthFrame)
  {
    _size = depthFrame->size;
    DepthFrame *o = dynamic_cast<DepthFrame *>(out.get());
    
    if(!o)
    {
      logger(LOG_ERROR) << "IIRFilter: Invalid frame type. Expecting DepthFrame." << std::endl;
      return false;
    }
    
    o->amplitude = depthFrame->amplitude;
    
    return _filter<float>(depthFrame->depth.data(), o->depth.data());
  }
  else
    return false;
}
void EffectMosaic::apply(FramePtr &src)
{

    for(int i=0; i < src->getHeight(); i+=mMosaicSize) {
        for(int j=0; j < src->getWidth(); j+=mMosaicSize) {
            auto pixel = AverageColor(src.get(), i, j);
            SetColor(src.get(), j, i, pixel);
        }
    }
}
Exemple #8
0
FramePtr StaticFrame::frame() const
{
    FramePtr p = boost::make_shared<Frame>();
    p->geometry = geometry;
    p->geometry.scale(scale);
    for (unsigned int i=0; i < data.size(); ++i) {
        p->addPoint(data[i].point());
    }
    return p;
}
Exemple #9
0
bool Reprojector::reprojectCell(Cell& cell, FramePtr frame)
{
  cell.sort(boost::bind(&Reprojector::pointQualityComparator, _1, _2));
  Cell::iterator it=cell.begin();
  while(it!=cell.end())
  {
    ++n_trials_;

    if(it->pt->type_ == Point::TYPE_DELETED)
    {
      it = cell.erase(it);
      continue;
    }

    bool found_match = true;
    if(options_.find_match_direct)
      found_match = matcher_.findMatchDirect(*it->pt, *frame, it->px);
    if(!found_match)
    {
      it->pt->n_failed_reproj_++;
      if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_failed_reproj_ > 15)
        map_.safeDeletePoint(it->pt);
      if(it->pt->type_ == Point::TYPE_CANDIDATE  && it->pt->n_failed_reproj_ > 30)
        map_.point_candidates_.deleteCandidatePoint(it->pt);
      it = cell.erase(it);
      continue;
    }
    it->pt->n_succeeded_reproj_++;
    if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_succeeded_reproj_ > 10)
      it->pt->type_ = Point::TYPE_GOOD;

    Feature* new_feature = new Feature(frame.get(), it->px, matcher_.search_level_);
    frame->addFeature(new_feature);

    // Here we add a reference in the feature to the 3D point, the other way
    // round is only done if this frame is selected as keyframe.
    new_feature->point = it->pt;

    if(matcher_.ref_ftr_->type == Feature::EDGELET)
    {
      new_feature->type = Feature::EDGELET;
      new_feature->grad = matcher_.A_cur_ref_*matcher_.ref_ftr_->grad;
      new_feature->grad.normalize();
    }

    // If the keyframe is selected and we reproject the rest, we don't have to
    // check this point anymore.
    it = cell.erase(it);

    // Maximum one point per cell.
    return true;
  }
  return false;
}
Exemple #10
0
bool ColorProducer::Initialize(FrameManagerPtr pFrameManager) {
	if(pFrameManager != 0) {
		FramePtr pFrame = pFrameManager->CreateFrame();
		if(pFrame != 0)	{
			memset_d(reinterpret_cast<unsigned long*>(pFrame->GetDataPtr()), colorValue_, pFrame->GetDataSize() / sizeof(unsigned long));
			frameBuffer_.push_back(pFrame);
			return true;
		}
	}
	return false;
}
Exemple #11
0
void AbstractStream::dataLoop(AbstractStream *stream)
{
    switch (stream->mediaType()) {
    case AVMEDIA_TYPE_VIDEO:
    case AVMEDIA_TYPE_AUDIO:
        while (stream->m_runDataLoop) {
            stream->m_dataMutex.lock();

            if (stream->m_frames.isEmpty())
                stream->m_dataQueueNotEmpty.wait(&stream->m_dataMutex,
                                                 THREAD_WAIT_LIMIT);

            if (!stream->m_frames.isEmpty()) {
                FramePtr frame = stream->m_frames.dequeue();
                stream->processData(frame.data());

                if (stream->m_frames.size() < stream->m_maxData)
                    stream->m_dataQueueNotFull.wakeAll();
            }

            stream->m_dataMutex.unlock();
        }

        break;
    case AVMEDIA_TYPE_SUBTITLE:
        while (stream->m_runDataLoop) {
            stream->m_dataMutex.lock();

            if (stream->m_subtitles.isEmpty())
                stream->m_dataQueueNotEmpty.wait(&stream->m_dataMutex,
                                                 THREAD_WAIT_LIMIT);

            if (!stream->m_subtitles.isEmpty()) {
                SubtitlePtr subtitle = stream->m_subtitles.dequeue();
                stream->processData(subtitle.data());

                if (stream->m_subtitles.size() < stream->m_maxData)
                    stream->m_dataQueueNotFull.wakeAll();
            }

            stream->m_dataMutex.unlock();
        }

        break;
    default:
        break;
    }
}
Exemple #12
0
int BufferSrcFilterContext::addFrame(const FramePtr &frame)
{
    assert(isValid());
    assert(frame);

    return av_buffersrc_add_frame(getAVFilterContext(), frame->getAVFrame());
}
Exemple #13
0
void DepthFilter::initializeSeeds(FramePtr frame)
{

  list<PointFeat*> new_pt_features;
  list<LineFeat*> new_seg_features;

  if(Config::hasPoints())
  {
      /* detect new point features in point-unpopulated cells of the grid */
      // populate the occupancy grid of the detector with current features
      pt_feature_detector_->setExistingFeatures(frame->pt_fts_);
      // detect features to fill the free cells in the image
      pt_feature_detector_->detect(frame.get(), frame->img_pyr_,
                                Config::triangMinCornerScore(), new_pt_features);
  }

  if(Config::hasLines())
  {
      /* detect new segment features in line-unpopulated cells of the grid */
      // populate the occupancy grid of the detector with current features
      seg_feature_detector_->setExistingFeatures(frame->seg_fts_);
      // detect features
      seg_feature_detector_->detect(frame.get(), frame->img_pyr_,
                                Config::lsdMinLength(), new_seg_features);
  }

  // lock the parallel updating thread?
  seeds_updating_halt_ = true;
  lock_t lock(seeds_mut_); // by locking the updateSeeds function stops
  // increase by one the keyframe counter (to account for this new one)
  ++PointSeed::batch_counter;

  // initialize a point seed for every new point feature
  std::for_each(new_pt_features.begin(), new_pt_features.end(), [&](PointFeat* ftr){
    pt_seeds_.push_back(PointSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
  });

  // initialize a segment seed for every new segment feature
  std::for_each(new_seg_features.begin(), new_seg_features.end(), [&](LineFeat* ftr){
    seg_seeds_.push_back(LineSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
  });

  if(options_.verbose)
    SVO_INFO_STREAM("DepthFilter: Initialized "<<new_pt_features.size()<<" new point seeds and "
                    <<new_seg_features.size()<<" line segment seeds.");
  seeds_updating_halt_ = false;
}
	bool decodeFrame(StreamFrameMap& streamFrames)
	{
		bool done = false;

		while(!done)
		{
			// demux
			PacketPtr packet = demuxPacket();
			if(packet == 0){
				FlogE("demuxing failed");
				return 0;
			}

			// decode
			decodePacket(packet, streamFrames);

			// check if any frames finished
			for(auto pair : streamFrames){
				FramePtr frame = pair.second;
				if(frame->finished != 0){
					// set timestamp and break out of loop
					int64_t pts = av_frame_get_best_effort_timestamp(frame->GetAvFrame());

					if(pair.first == videoStream){
						if(firstDts == AV_NOPTS_VALUE){
							firstDts = frame->GetAvFrame()->pkt_dts;
							FlogD("setting firstDts to: " << firstDts);
						}

						if(firstPts == AV_NOPTS_VALUE){
							firstPts = pts;
							FlogD("setting firstPts to: " << firstPts);
						}
					}
					
					frame->SetPts(pts);
					done = true;
				}
			}
		}
		
		// successfully decoded frame, reset retry counter
		ResetRetries();

		return true;
	}
Exemple #15
0
const QIcon * StaticFrameGui::icon()
{
    QPixmap p(48,48);
    if (fp) {
        FramePtr f = fp->frame();
        if (f) {
            QPainter q(&p);
            q.setBackground (Qt::black);
            q.setRenderHint(QPainter::HighQualityAntialiasing);
            q.setBrush(QBrush(Qt::black));
            q.drawRect(0,0,48,48);
            f->render (q,0,0,48,48);
        }
    }
    QIcon * i = new QIcon(p);
    return i;
}
Exemple #16
0
bool TargaProducer::Initialize(FrameManagerPtr pFrameManager) {
	if(pFrameManager != 0) {
		FramePtr pFrame = pFrameManager->CreateFrame();
		if(pFrame != 0 && pFrame->GetDataPtr() != 0) {
			PixmapDataPtr pResult = TargaManager::CropPadToFrameFormat(pImage_, pFrameManager->GetFrameFormatDescription());

			unsigned char* pFrameData = pFrame->GetDataPtr();
			unsigned char* pImageData = pResult->GetDataPtr();

			memcpy(pFrameData, pImageData, pFrame->GetDataSize());

			frameBuffer_.push_back(pFrame);
			return true;
		}
	}
	return false;
}
//
// This event handler is triggered through a MFC message posted by the frame observer
//
// Parameters:
//  [in]    status          The frame receive status (complete, incomplete, ...)
//  [in]    lParam          [Unused, demanded by MFC signature]
//
// Returns:
//  Nothing, always returns 0
//
LRESULT CAsynchronousGrabDlg::OnFrameReady( WPARAM status, LPARAM lParam )
{
    if( true == m_bIsStreaming )
    {
        // Pick up frame
        FramePtr pFrame = m_ApiController.GetFrame();
        if( SP_ISNULL( pFrame) )
        {
            Log( _TEXT("frame ptr is NULL, late call") );
            return 0;
        }
        // See if it is not corrupt
        if( VmbFrameStatusComplete == status )
        {
            VmbUchar_t *pBuffer;
            VmbUchar_t *pColorBuffer = NULL;
            VmbErrorType err = pFrame->GetImage( pBuffer );
            if( VmbErrorSuccess == err )
            {
                VmbUint32_t nSize;
                err = pFrame->GetImageSize( nSize );
                if( VmbErrorSuccess == err )
                {
                    VmbPixelFormatType ePixelFormat = m_ApiController.GetPixelFormat();
                    CopyToImage( pBuffer,ePixelFormat, m_Image );
                    // Display it
                    RECT rect;
                    m_PictureBoxStream.GetWindowRect( &rect );
                    ScreenToClient( &rect );
                    InvalidateRect( &rect, false );
                }
            }
        }
        else
        {
            // If we receive an incomplete image we do nothing but logging
            Log( _TEXT( "Failure in receiving image" ), VmbErrorOther );
        }

        // And queue it to continue streaming
        m_ApiController.QueueFrame( pFrame );
    }

    return 0;
}
	bool update()
	{
		tick();
		adjustTime();
		FramePtr newFrame = fetchFrame();

		if(newFrame != 0){
			// Don't free currentFrame if it is currentFrame itself that's being converted
			if(currentFrame == 0 || currentFrame->GetAvFrame() != newFrame->GetAvFrame()){
				currentFrame = newFrame; // Save the current frame for snapshots etc.
			}

			lastFrameQueuePts = timeFromTs(newFrame->GetPts());
			return true;
		}

		return false;
	}
Exemple #19
0
void DepthFilter::getSeedsCopy(const FramePtr& frame, std::list<Seed>& seeds)
{
  lock_t lock(seeds_mut_);
  for(std::list<Seed>::iterator it=seeds_.begin(); it!=seeds_.end(); ++it)
  {
    if (it->ftr->frame == frame.get())
      seeds.push_back(*it);
  }
}
Exemple #20
0
	/// 检测fast角度,输出的是对应的点和点的方向向量(可以考虑为点的反投影坐标)
	void Initialization::detectFeatures(
		FramePtr frame,
		std::vector<cv::Point2f>& px_vec,
		std::vector<Vector3d>& f_vec)
	{
		Features new_features;
		FastDetector detector(
			frame->img().cols, frame->img().rows, 25, 3);
		detector.detect(frame.get(), frame->img_pyr_, 20.0, new_features);

		// 返回特征位置和特征的单位向量
		px_vec.clear(); px_vec.reserve(new_features.size());
		f_vec.clear(); f_vec.reserve(new_features.size());
		std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
			px_vec.push_back(cv::Point2f(ftr->px[0], ftr->px[1]));
			f_vec.push_back(ftr->f);
			delete ftr;
		});
	}
bool PointCloudFrameGenerator::generate(const FramePtr &in, FramePtr &out)
{
  const DepthFrame *depthFrame = dynamic_cast<const DepthFrame *>(in.get());
  
  if(!depthFrame)
  {
    logger(LOG_ERROR) << "PointCloudFrameGenerator: Only DepthFrame type is supported as input to generate() function." << std::endl;
    return false;
  }
  
  XYZIPointCloudFrame *f = dynamic_cast<XYZIPointCloudFrame *>(out.get());
  
  if(!f)
  {
    f = new XYZIPointCloudFrame();
    out = FramePtr(f);
  }
  
  f->id = depthFrame->id;
  f->timestamp = depthFrame->timestamp;
  f->points.resize(depthFrame->size.width*depthFrame->size.height);
  
  if(!_pointCloudTransform->depthToPointCloud(depthFrame->depth, *f))
  {
    logger(LOG_ERROR) << "DepthCamera: Could not convert depth frame to point cloud frame" << std::endl;
    return false;
  }
  
  // Setting amplitude as intensity
  auto index = 0;
  
  auto w = depthFrame->size.width;
  auto h = depthFrame->size.height;
  
  for(auto y = 0; y < h; y++)
    for(auto x = 0; x < w; x++, index++)
    {
      IntensityPoint &p = f->points[index];
      p.i = depthFrame->amplitude[index];
    }
    
  return true;
}
void detectFeatures(
    FramePtr frame,
    vector<cv::Point2f>& px_vec,
    vector<Vector3d>& f_vec)
{
  Features new_features;
  feature_detection::FastDetector detector(
      frame->img().cols, frame->img().rows, Config::gridSize(), Config::nPyrLevels());
  detector.detect(frame.get(), frame->img_pyr_, Config::triangMinCornerScore(), new_features);

  // now for all maximum corners, initialize a new seed
  px_vec.clear(); px_vec.reserve(new_features.size());
  f_vec.clear(); f_vec.reserve(new_features.size());
  std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
    px_vec.push_back(cv::Point2f(ftr->px[0], ftr->px[1]));
    f_vec.push_back(ftr->f);
    delete ftr;
  });
}
	void decodePacket(PacketPtr packet, StreamFrameMap& streamFrames)
	{
		int bytesRemaining = packet->avPacket.size;

		// Decode until all bytes in the read frame is decoded
		while(bytesRemaining > 0)
		{
			int bytesDecoded = 0;
			int idx = packet->avPacket.stream_index;
			auto it = streamFrames.find(idx);

			if(it != streamFrames.end()){
				FramePtr frame = it->second;

				switch(pFormatCtx->streams[idx]->codec->codec_type){
					case AVMEDIA_TYPE_VIDEO:
						if( (bytesDecoded = avcodec_decode_video2(pCodecCtx, frame->GetAvFrame(), &frame->finished, &packet->avPacket)) <= 0 ){
							Retry(Str("avcodec_decode_video2() failed in decodePacket, returned: " << bytesDecoded));
						}
						frame->hasVideo = true;
						break;

					case AVMEDIA_TYPE_AUDIO:
						if((bytesDecoded = audioHandler->decode(packet->avPacket, pFormatCtx->streams[audioStream], frame, frame->finished)) <= 0){
							Retry(Str("audio decoder failed in decodePacket, returned: " << bytesDecoded));
						}
						frame->hasAudio = true;
						break;

					default:
						break;
				}
			}else{
				// stream not handled
				return;
			}

			bytesRemaining -= bytesDecoded;

			if(bytesRemaining > 0)
				Retry(Str("decodePacket not finished, bytesRemaining: " << bytesRemaining << ", bytesDecoded: " << bytesDecoded));
		}
	}
Exemple #24
0
		void DisplayFrame(Frame* pFrame)
		{
			if(pFrame != NULL && pFrame->HasValidDataPtr()) {
				if(GetFrameManager()->Owns(*pFrame)) {
					DoRender(pFrame);
				}
				else {
					FramePtr pTempFrame = GetReservedFrame();
					if(pTempFrame && pFrame->GetDataSize() == pTempFrame->GetDataSize()) {
						utils::image::Copy(pTempFrame->GetDataPtr(), pFrame->GetDataPtr(), pTempFrame->GetDataSize());
						DoRender(pTempFrame.get());
					}
					else
						LOG << TEXT("DECKLINK: Failed to get reserved frame");
				}
			}
			else {
				LOG << TEXT("DECKLINK: Tried to render frame with no data");
			}
		}
	FramePtr fetchFrame()
	{
		bool wasStepIntoQueue = stepIntoQueue;

		if(frameQueue.empty() && IsEof() && !reportedEof)
		{
			reportedEof = true;
			messageCallback(MEof, "eof");
		}

		if(stepIntoQueue && !frameQueue.empty())
		{
			stepIntoQueue = false;
			timeHandler->SetTime(timeFromTs(frameQueue.top()->GetPts()) + .001);
			audioHandler->discardQueueUntilTs(timeHandler->GetTime());
		}

		double time = timeHandler->GetTime();

		FramePtr newFrame = 0;

		// Throw away all old frames (timestamp older than now) except for the last
		// and set the pFrame pointer to that.
		int poppedFrames = 0;

		while(!frameQueue.empty() && timeFromTs(frameQueue.top()->GetPts()) < time)
		{
			newFrame = frameQueue.top();
			frameQueue.pop();
			poppedFrames++;
		}
			
		if(poppedFrames > 1){
			FlogD("skipped " << poppedFrames - 1 << " frames");
		}

		if(newFrame != 0 && (newFrame->GetPts() >= time || wasStepIntoQueue))
			return newFrame;

		return 0;
	}
Exemple #26
0
void FrameManager::addFrame(const FramePtr &frame)
{
	const std::string &frameName = frame->getName();
	if (_knownFrames.find(frameName) == _knownFrames.end())
	{
		_knownFrames[frameName] = frame;
	}
	else
	{
		ERRORMSG("Trying to add a duplicate frame name.");
	}
}
Exemple #27
0
bool Reprojector::reprojectPoint(FramePtr frame, Point* point)
{
  Vector2d px(frame->w2c(point->pos_));
  if(frame->cam_->isInFrame(px.cast<int>(), 8)) // 8px is the patch size in the matcher
  {
    const int k = static_cast<int>(px[1]/grid_.cell_size)*grid_.grid_n_cols
                + static_cast<int>(px[0]/grid_.cell_size);
    grid_.cells.at(k)->push_back(Candidate(point, px));
    return true;
  }
  return false;
}
Exemple #28
0
void Builder::showFrame(DisplayFlag const& ds_flag, StabilizerPtr const& stabilizer, cv::Mat & img){

	FramePtr frame;
	if(ds_flag == DisplayFlags::ORIGINAL_WND){
		frame = stabilizer->getFrameBuf()->getNext();

		//frame->mat().copySize(img);
		frame->mat().copyTo(img);

	}else if(ds_flag == DisplayFlags::GRAY_WND){
		frame = stabilizer->getFrameBuf()->getNext();

		//frame->matGray().copySize(img);
		frame->matGray().copyTo(img);
	} else if(ds_flag == DisplayFlags::STABILIZED_WND){
		frame = stabilizer->getStabFrameBuf()->getNext();

		//frame->mat().copySize(img);
		frame->mat().copyTo(img);
	}
}
Exemple #29
0
void Visualizer::visualizeMarkers(
    const FramePtr& frame,
    const set<FramePtr>& core_kfs,
    const Map& map,
    bool inited,
    double svo_scale,
    double our_scale)
{
  if((frame == NULL) || !inited)
  {
      vk::output_helper::publishTfTransform(
//                  SE3(Matrix3d::Identity(), Vector3d::Zero()),
                  T_world_from_vision_,
                  ros::Time(frame->timestamp_), "odom", "cam_pos", br_);
      return;
  }

  SE3 temp = (frame->T_f_w_*T_world_from_vision_.inverse()).inverse();
  double scale = our_scale / svo_scale;
  temp.translation() = temp.translation()* scale;

  vk::output_helper::publishTfTransform(
      temp,
      ros::Time(frame->timestamp_), "odom", "cam_pos", br_);

  if(pub_frames_.getNumSubscribers() > 0 || pub_points_.getNumSubscribers() > 0)
  {
    vk::output_helper::publishHexacopterMarker(
        pub_frames_, "cam_pos", "cams", ros::Time(frame->timestamp_),
        1, 0, 0.3, Vector3d(0.,0.,1.));
    vk::output_helper::publishPointMarker(
        pub_points_, T_world_from_vision_*frame->pos(), "trajectory",
        ros::Time::now(), trace_id_, 0, 0.006, Vector3d(0.,0.,0.5));
    if(frame->isKeyframe() || publish_map_every_frame_)
      publishMapRegion(core_kfs);
    removeDeletedPts(map);
  }
}
Exemple #30
0
void Builder::showOpticalFlow(FramePtr const& frame, cv::Scalar const& color, cv::Mat & img){
	cv::Point2f pt;
	VideoLib::Frame::Features& features = frame->features();
	for(int i = 0; i < img.rows; i+=10){
		for(int j = 0; j<img.cols; j+=10){
			pt = features.at<cv::Point2f>(i,j);
			if(pt.x != 0 && pt.y !=0){
				cv::line(img,cv::Point2f(i,j),cv::Point2f(i+pt.x,j+pt.y), color,1);
				//cv::circle(img, cv::Point(i,j), 2, color, -1);
			}
		}
	}

}