Exemple #1
0
/**
 * @brief Video::convertToEvent
 * @param path
 * @return
 */
EventPtr Video::convertToEvent(std::string path){
	cv::Mat shot;
	FramePtr frame;
	EventPtr event;

	unsigned int j=0;
	int framecount=0;

	double tmpPos = getFramePos();
	setFramePos(0);

	emit startProgress(0, (uint) getLengthFrames());

	while(getNextFrame(shot)){
		emit progressChanged(j);

		if (event.isNull()){
			event = EventPtr(new Event(this));
		}
		// create new frame
		frame = FramePtr(new Frame(this, shot, path));
		// add frame to event
		event->addFrame(frame);
		framecount ++;
		j++;
	}

	setFramePos(tmpPos);

	return event;
}
/**
* @brief 
*
* @return 
*/
FramePtr FeedProvider::pollFrame() const
{
    if ( !ready() || !mCanPoll )
        return( FramePtr( NULL ) );
    mFrameMutex.lock();
    FramePtr frame( mLastFrame );
    mFrameMutex.unlock();
    return( frame );
}
void FrameManager::addFrame(const std::string &frameName, int priority)
{
    if (_knownFrames.find(frameName) == _knownFrames.end())
	{
		_knownFrames[frameName] = FramePtr(new Frame(frameName, priority));
	}
    else
	{
		ERRORMSG("Trying to add a duplicate frame name.");
	}
}
Exemple #4
0
void AbstractStream::dataEnqueue(AVFrame *frame)
{
    this->m_dataMutex.lock();

    if (this->m_frames.size() >= this->m_maxData)
        this->m_dataQueueNotFull.wait(&this->m_dataMutex);

    this->m_frames.enqueue(FramePtr(frame, this->deleteFrame));
    this->m_dataQueueNotEmpty.wakeAll();
    this->m_dataMutex.unlock();
}
Exemple #5
0
  void FrameGrabber::run() {
    for (;;) {
      // Stop when requested.
      if (isInterruptionRequested()) {
        return;
      }

      QImage img = session->getSnapshot(false);
      emit frameCaptured(FramePtr(new QImage(img)));
      usleep(512);
    }
  }
/**
* @brief 
*
* @return 
*/
int FilterSwapUV::run()
{
    if ( waitForProviders() )
    {
        uint16_t inputWidth = videoProvider()->width();
        uint16_t inputHeight = videoProvider()->height();
        PixelFormat inputPixelFormat = videoProvider()->pixelFormat();
        ByteBuffer tempBuffer;
        int yChannelSize = inputWidth*inputHeight;
        int uvChannelSize = yChannelSize/4;

        if ( inputPixelFormat != PIX_FMT_YUV420P )
            Fatal( "Can't swap UV for pixel format %d", inputPixelFormat );

        while ( !mStop )
        {
            mQueueMutex.lock();
            if ( !mFrameQueue.empty() )
            {
                Debug( 3, "Got %zd frames on queue", mFrameQueue.size() );
                for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ )
                {
                    //const VideoFrame *frame = dynamic_cast<const VideoFrame *>(iter->get());
                    //FramePtr framePtr( *iter );
                    const FeedFrame *frame = (*iter).get();

                    Debug(1, "%s / Provider: %s, Source: %s, Frame: %p (%ju / %.3f) - %lu", cname(), frame->provider()->cidentity(), frame->originator()->cidentity(), frame, frame->id(), frame->age(), frame->buffer().size() );

                    //Image image( inputPixelFormat, inputWidth, inputHeight, frame->buffer().data() );

                    tempBuffer.size( frame->buffer().size() );
                    memcpy( tempBuffer.data(), frame->buffer().data(), yChannelSize );
                    memcpy( tempBuffer.data()+yChannelSize, frame->buffer().data()+yChannelSize+uvChannelSize, uvChannelSize);
                    memcpy( tempBuffer.data()+yChannelSize+uvChannelSize, frame->buffer().data()+yChannelSize, uvChannelSize);
                    VideoFrame *videoFrame = new VideoFrame( this, *iter, mFrameCount, frame->timestamp(), tempBuffer );
                    distributeFrame( FramePtr( videoFrame ) );

                    //delete *iter;
                    mFrameCount++;
                }
                mFrameQueue.clear();
            }
            mQueueMutex.unlock();
            checkProviders();
            // Quite short so we can always keep up with the required packet rate for 25/30 fps
            usleep( INTERFRAME_TIMEOUT );
        }
    }
    FeedProvider::cleanup();
    FeedConsumer::cleanup();
    return( !ended() );
}
/**
* @brief 
*
* @param consumer
* @param link
*
* @return 
*/
bool H264Encoder::registerConsumer( FeedConsumer &consumer, const FeedLink &link )
{
    if ( Encoder::registerConsumer( consumer, link ) )
    {
        if ( !mInitialFrame.empty() )
        {
            //VideoFrame *outputVideoFrame = new VideoFrame( this, ++mFrameCount, mCodecContext->coded_frame->pts, mInitialFrame );
            VideoFrame *outputVideoFrame = new VideoFrame( this, ++mFrameCount, 0, mInitialFrame );
            consumer.queueFrame( FramePtr( outputVideoFrame ), this );
        }
        return( true );
    }
    return( false );
}
/**
* @brief 
*
* @return 
*/
int ImageTimestamper::run()
{
    if ( waitForProviders() )
    {
        uint16_t inputWidth = videoProvider()->width();
        uint16_t inputHeight = videoProvider()->height();
        PixelFormat inputPixelFormat = videoProvider()->pixelFormat();

        while ( !mStop )
        {
            mQueueMutex.lock();
            if ( !mFrameQueue.empty() )
            {
                Debug( 3, "Got %zd frames on queue", mFrameQueue.size() );
                for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ )
                {
                    //const VideoFrame *frame = dynamic_cast<const VideoFrame *>(iter->get());
                    //FramePtr framePtr( *iter );
                    const FeedFrame *frame = (*iter).get();

                    Debug(1, "%s / Provider: %s, Source: %s, Frame: %p (%ju / %.3f) - %lu", cname(), frame->provider()->cidentity(), frame->originator()->cidentity(), frame, frame->id(), frame->age(), frame->buffer().size() );

                    Image image( inputPixelFormat, inputWidth, inputHeight, frame->buffer().data() );

                    if ( timestampImage( &image, frame->timestamp() ) )
                    {
                        VideoFrame *videoFrame = new VideoFrame( this, *iter, mFrameCount, frame->timestamp(), image.buffer() );
                        distributeFrame( FramePtr( videoFrame ) );
                    }
                    else
                    {
                        distributeFrame( *iter );
                    }

                    //delete *iter;
                    mFrameCount++;
                }
                mFrameQueue.clear();
            }
            mQueueMutex.unlock();
            checkProviders();
            // Quite short so we can always keep up with the required packet rate for 25/30 fps
            usleep( INTERFRAME_TIMEOUT );
        }
    }
    FeedProvider::cleanup();
    FeedConsumer::cleanup();
    return( !ended() );
}
/**
* @brief 
*
* @return 
*/
int MemoryInputV1::run()
{
    SharedData sharedData;
    memset( &sharedData, 0, sizeof(sharedData) );

    while( !mStop )
    {
        Info( "Querying memory" );
        if ( queryMemory( &sharedData ) && sharedData.valid )
            break;
        Info( "Can't query shared memory" );
        usleep( 500000 );
    }
    //Info( "SHV: %d", sharedData.valid );
    //mImageCount = 40;
    //mPixelFormat = sharedData.imageFormat;
    //mPixelFormat = PIX_FMT_UYVY422;
    //mPixelFormat = PIX_FMT_YUYV422;
    //mPixelFormat = PIX_FMT_RGB24;
    //mFrameRate = 15;
    ////mImageWidth = sharedData.imageWidth;
    //mImageWidth = 720;
    ////mImageHeight = sharedData.imageHeight;
    //mImageHeight = 576;

    attachMemory( mImageCount, mPixelFormat, mImageWidth, mImageHeight );
    int lastWriteIndex = 0;
    while( !mStop )
    {
        if ( !mSharedData || !mSharedData->valid )
        {
            stop();
            break;
        }
        if ( mSharedData->last_write_index != lastWriteIndex )
        {
            const FeedFrame *frame = loadFrame();
            //Info( "Sending frame %d", frame->id() );
            lastWriteIndex = mSharedData->last_write_index;
            distributeFrame( FramePtr( frame ) );
            //delete frame;
            mFrameCount++;
        }
        usleep( INTERFRAME_TIMEOUT );
    }
    cleanup();
    return( !ended() );
}
bool FrameStreamReader::readNext()
{
  if(_currentFrameIndex >= size())
    return false;
  
  size_t nextIndex = _dataPacketLocation[_currentFrameIndex];
  
  if(nextIndex > _currentPacketIndex) // some non-data packets in between?
  {
    for(; _currentPacketIndex < nextIndex; _currentPacketIndex++)
    {
      _readConfigPacket(_currentPacketIndex);
    }
  }
  
  RawDataFrame *r = dynamic_cast<RawDataFrame *>(frames[0].get());
  if(!r)
  {
    r = new RawDataFrame();
    frames[0] = FramePtr(r);
  }
  
  if(!_getPacket(_currentPacketIndex, _dataPacket))
  {
    logger(LOG_ERROR) << "FrameStreamReader: Failed to get data packet at index = " << _currentFrameIndex << std::endl;
    return false;
  }
  
  _currentFrameIndex++;
  _currentPacketIndex++;
  
  if(!r->deserialize(_dataPacket.object))
  {
    logger(LOG_ERROR) << "FrameStreamReader: Failed to deserialize data packet at index = " << _currentFrameIndex << std::endl;
    return false;
  }
  
  if(!_frameGenerator[0]->generate(frames[0], frames[1]) ||
    !_frameGenerator[1]->generate(frames[1], frames[2]) ||
    !_frameGenerator[2]->generate(frames[2], frames[3]))
  {
    logger(LOG_ERROR) << "FrameStreamReader: Failed to process and generate subsequent frame types at index = " << _currentFrameIndex << std::endl;
    return false;
  }
    
  return true;
}
bool PointCloudFrameGenerator::generate(const FramePtr &in, FramePtr &out)
{
  const DepthFrame *depthFrame = dynamic_cast<const DepthFrame *>(in.get());
  
  if(!depthFrame)
  {
    logger(LOG_ERROR) << "PointCloudFrameGenerator: Only DepthFrame type is supported as input to generate() function." << std::endl;
    return false;
  }
  
  XYZIPointCloudFrame *f = dynamic_cast<XYZIPointCloudFrame *>(out.get());
  
  if(!f)
  {
    f = new XYZIPointCloudFrame();
    out = FramePtr(f);
  }
  
  f->id = depthFrame->id;
  f->timestamp = depthFrame->timestamp;
  f->points.resize(depthFrame->size.width*depthFrame->size.height);
  
  if(!_pointCloudTransform->depthToPointCloud(depthFrame->depth, *f))
  {
    logger(LOG_ERROR) << "DepthCamera: Could not convert depth frame to point cloud frame" << std::endl;
    return false;
  }
  
  // Setting amplitude as intensity
  auto index = 0;
  
  auto w = depthFrame->size.width;
  auto h = depthFrame->size.height;
  
  for(auto y = 0; y < h; y++)
    for(auto x = 0; x < w; x++, index++)
    {
      IntensityPoint &p = f->points[index];
      p.i = depthFrame->amplitude[index];
    }
    
  return true;
}
Exemple #12
0
FramePtr StaticFrame::nextFrame(PlaybackImplPtr pb)
{
    assert (pb);
    StaticFramePlaybackPtr sp = boost::shared_dynamic_cast<StaticFramePlayback>(pb);
    assert (sp);
    if (useDewell) {
        // Dewell timer active
        if (sp->dewellStart.elapsed() > (int)dewell) {
            sp->dewellStart.restart();
            sp->active = false;
        }
    } else {
        if (sp->repeatsDone++ >= repeats) {
            sp->active = false;
        }
    }
    if (sp->active) {
        return frame();
    } else {
        sp->active = true;
        sp->repeatsDone = 0;
    }
    return FramePtr();
}
/**
* @brief 
*
* @return 
*/
int H264Encoder::run()
{
    // TODO - This section needs to be rewritten to read the configuration from the values saved
    // for the streams via the web gui
    AVDictionary *opts = NULL;
    //avSetH264Preset( &opts, "default" );
    //avSetH264Profile( &opts, "main" );
    //avDictSet( &opts, "level", "4.1" );
    avSetH264Preset( &opts, "ultrafast" );
    //avSetH264Profile( &opts, "baseline" );
    avDictSet( &opts, "level", "31" );
    avDictSet( &opts, "g", "24" );
    //avDictSet( &opts, "b", (int)mBitRate );
    //avDictSet( &opts, "bitrate", (int)mBitRate );
    //avDictSet( &opts, "crf", "24" );
    //avDictSet( &opts, "framerate", (double)mFrameRate );
    //avDictSet( &opts, "fps", (double)mFrameRate );
    //avDictSet( &opts, "r", (double)mFrameRate );
    //avDictSet( &opts, "timebase", "1/90000" );
    avDumpDict( opts );

    // Make sure ffmpeg is compiled with libx264 support
    AVCodec *codec = avcodec_find_encoder( CODEC_ID_H264 );
    if ( !codec )
        Fatal( "Can't find encoder codec" );

    mCodecContext = avcodec_alloc_context3( codec );

    mCodecContext->width = mWidth;
    mCodecContext->height = mHeight;
    //mCodecContext->time_base = TimeBase( 1, 90000 );
    mCodecContext->time_base = mFrameRate.timeBase();
    mCodecContext->bit_rate = mBitRate;
    mCodecContext->pix_fmt = mPixelFormat;

    mCodecContext->gop_size = 24;
    //mCodecContext->max_b_frames = 1;

    Debug( 2, "Time base = %d/%d", mCodecContext->time_base.num, mCodecContext->time_base.den );
    Debug( 2, "Pix fmt = %d", mCodecContext->pix_fmt );

    /* open it */
    if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 )
        Fatal( "Unable to open encoder codec" );

    avDumpDict( opts );
    AVFrame *inputFrame = avcodec_alloc_frame();

    Info( "%s:Waiting", cidentity() );
    if ( waitForProviders() )
    {
        Info( "%s:Waited", cidentity() );

        // Find the source codec context
        uint16_t inputWidth = videoProvider()->width();
        uint16_t inputHeight = videoProvider()->height();
        PixelFormat inputPixelFormat = videoProvider()->pixelFormat();
        //FrameRate inputFrameRate = videoProvider()->frameRate();
        //Info( "CONVERT: %d-%dx%d -> %d-%dx%d",
            //inputPixelFormat, inputWidth, inputHeight,
            //mPixelFormat, mWidth, mHeight
        //);

        // Make space for anything that is going to be output
        AVFrame *outputFrame = avcodec_alloc_frame();
        ByteBuffer outputBuffer;
        outputBuffer.size( avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height ) );
        avpicture_fill( (AVPicture *)outputFrame, outputBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height );

        // Prepare for image format and size conversions
        struct SwsContext *convertContext = sws_getContext( inputWidth, inputHeight, inputPixelFormat, mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
        if ( !convertContext )
            Fatal( "Unable to create conversion context for encoder" );

        int outSize = 0;
        uint64_t timeInterval = mFrameRate.intervalUsec();
        uint64_t currTime = time64();
        uint64_t nextTime = currTime;
        //outputFrame->pts = currTime;
        outputFrame->pts = 0;
        uint32_t ptsInterval = 90000/mFrameRate.toInt();
        //uint32_t ptsInterval = mFrameRate.intervalPTS( mCodecContext->time_base );
        while ( !mStop )
        {
            // Synchronise the output with the desired output frame rate
            while ( currTime < nextTime )
            {
                currTime = time64();
                usleep( 1000 );
            }
            nextTime += timeInterval;

            FramePtr framePtr;
            mQueueMutex.lock();
            if ( !mFrameQueue.empty() )
            {
                if ( mInitialFrame.empty() || !mConsumers.empty() )
                {
                    FrameQueue::iterator iter = mFrameQueue.begin();
                    framePtr = *iter;
                }
                mFrameQueue.clear();
            }
            mQueueMutex.unlock();

            if ( framePtr.get() )
            {
                const FeedFrame *frame = framePtr.get();
                const VideoFrame *inputVideoFrame = dynamic_cast<const VideoFrame *>(frame);

                //Info( "Provider: %s, Source: %s, Frame: %d", inputVideoFrame->provider()->cidentity(), inputVideoFrame->originator()->cidentity(), inputVideoFrame->id() );
                //Info( "PF:%d @ %dx%d", inputVideoFrame->pixelFormat(), inputVideoFrame->width(), inputVideoFrame->height() );

                avpicture_fill( (AVPicture *)inputFrame, inputVideoFrame->buffer().data(), inputPixelFormat, inputWidth, inputHeight );

                //outputFrame->pts = currTime;
                //Debug( 5, "PTS %jd", outputFrame->pts );
       
                // Reformat the input frame to fit the desired output format
                //Info( "SCALE: %d -> %d", int(inputFrame->data[0])%16, int(outputFrame->data[0])%16 );
                if ( sws_scale( convertContext, inputFrame->data, inputFrame->linesize, 0, inputHeight, outputFrame->data, outputFrame->linesize ) < 0 )
                    Fatal( "Unable to convert input frame (%d@%dx%d) to output frame (%d@%dx%d) at frame %ju", inputPixelFormat, inputWidth, inputHeight, mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height, mFrameCount );

                // Encode the image
                outSize = avcodec_encode_video( mCodecContext, outputBuffer.data(), outputBuffer.capacity(), outputFrame );
                Debug( 5, "Encoding reports %d bytes", outSize );
                if ( outSize > 0 )
                {
                    //Info( "CPTS: %jd", mCodecContext->coded_frame->pts );
                    outputBuffer.size( outSize );
                    //Debug( 5, "PTS2 %jd", mCodecContext->coded_frame->pts );
                    if ( mInitialFrame.empty() )
                    {
                        Debug( 3, "Looking for H.264 stream info" );
                        const uint8_t *startPos = outputBuffer.head();
                        startPos = h264StartCode( startPos, outputBuffer.tail() );
                        while ( startPos < outputBuffer.tail() )
                        {
                            while( !*(startPos++) )
                                ;
                            const uint8_t *nextStartPos = h264StartCode( startPos, outputBuffer.tail() );

                            int frameSize = nextStartPos-startPos;

                            unsigned char type = startPos[0] & 0x1F;
                            unsigned char nri = startPos[0] & 0x60;
                            Debug( 1, "Type %d, NRI %d (%02x)", type, nri>>5, startPos[0] );

                            if ( type == NAL_SEI )
                            {
                                // SEI
                                mSei.assign( startPos, frameSize );
                            }
                            else if ( type == NAL_SPS )
                            {
                                // SPS
                                Hexdump( 2, startPos, frameSize );
                                mSps.assign( startPos, frameSize );

                                if ( frameSize < 4 )
                                    Panic( "H.264 NAL type 7 frame too short (%d bytes) to extract level/profile", frameSize );
                                mAvcLevel = startPos[3];
                                mAvcProfile = startPos[1];
                                Debug( 2, "Got AVC level %d, profile %d", mAvcLevel, mAvcProfile );
                            }
                            else if ( type == NAL_PPS )
                            {
                                // PPS
                                Hexdump( 2, startPos, frameSize );
                                mPps.assign( startPos, frameSize );
                            }
                            startPos = nextStartPos;
                        }
                        mInitialFrame = outputBuffer;
                        //VideoFrame *outputVideoFrame = new VideoFrame( this, ++mFrameCount, mCodecContext->coded_frame->pts, mInitialFrame );
                    }
                    else
                    {
                        //av_rescale_q(cocontext->coded_frame->pts, cocontext->time_base, videostm->time_base); 
                        VideoFrame *outputVideoFrame = new VideoFrame( this, ++mFrameCount, mCodecContext->coded_frame->pts, outputBuffer );
                        distributeFrame( FramePtr( outputVideoFrame ) );
                    }
                }
                outputFrame->pts += ptsInterval;   ///< FIXME - This can't be right, but it works...
            }
Exemple #14
0
/**
 * @brief Automatically splits the video into several events provided the
 * given parameters.
 * @param threshold
 * @param maxcount
 * @param mincount
 * @param history
 * @param varThreshold
 * @param bShadowDetection
 * @param path
 * @return
 */
std::deque<EventPtr> Video::autoDetectEvents(double threshold,
										   double maxcount,
										   double mincount,
										   int history,
										   int varThreshold,
										   bool bShadowDetection,
										   std::string path){
	cv::Mat shot;
	FramePtr frame;
	SnapshotPtr snap;
	EventPtr event;
	std::deque<EventPtr> events;

	unsigned int j=0;
	int emptycount=0;
	int framecount=0;
	int value;
	int absoluteThreshold = threshold/100*resolution.width*resolution.height;
	int i;


	// Initialization of background subtraction
	bgSubInit(history, varThreshold, bShadowDetection);

	setFramePos(0);

	emit startProgress(0, (uint) getLengthFrames());

	while(getNextFrame(shot)){
		QCoreApplication::processEvents();
		if (toCancel){
			events.clear();
			canceled();
			return events;
		}
		bg->NewFrame(shot);
		bg->Denoise();
		emit progressChanged(j);
		value = cv::countNonZero(bg->Foreground());

		// Detected change
		if ( value > absoluteThreshold ){
			if (event.isNull()){
				event = EventPtr(new Event(this));
			}
			// create new frame
			frame = FramePtr(new Frame(this, shot, path));
			snap = SnapshotPtr(new Snapshot(frame, bg->Foreground(), path));
			// add frame to event
			event->addFrame(frame);
			event->addSnapshot(snap);
			framecount ++;
			emptycount = 0;
		}
		// Did not detect change
		else if (!event.isNull()){
			emptycount ++;
			// create new frame
			frame = FramePtr(new Frame(this, shot, path));
			snap = SnapshotPtr(new Snapshot(frame, bg->Foreground(), path));
			// add frame to event
			event->addFrame(frame);
			event->addSnapshot(snap);
			framecount ++;
			if(emptycount > maxcount){
				if (framecount - emptycount > mincount){
					// remove extra frames with no movement
					for (i = 0; i < maxcount; i++){
						event->remLastFrame();
						event->remLastSnapshot();
					}
					events.push_back(event);
				}
				event.clear();
				emptycount = 0;
				framecount = 0;
			}
		}
		j++;
	}
	// Check if Video ended in the middle of an Event.
	if (!event.isNull()){
		if (framecount > mincount){
			events.push_back(event);
		} else {
			event.clear();
		}
	}
	return events;
}
Exemple #15
0
		FramePtr CreateReservedFrame() {
			return FramePtr(new DecklinkVideoFrame(this));
		}
Exemple #16
0
FramePtr Video::getCurrentFrameRef(){
	cv::Mat shot;
	if (getFrame(shot))
		return FramePtr(new Frame(this, shot, getCacheDir()));
	return FramePtr(NULL);
}
/**
* @brief 
*
* @return 
*/
int ImageScale::run()
{
    AVFrame *inputFrame = av_frame_alloc();
    AVFrame *outputFrame = av_frame_alloc();

    if ( waitForProviders() )
    {
        uint16_t inputWidth = videoProvider()->width();
        uint16_t inputHeight = videoProvider()->height();
        PixelFormat pixelFormat = videoProvider()->pixelFormat();

        mWidth = inputWidth * mScale;
        mHeight = inputHeight * mScale;

        // Prepare for image format and size conversions
        mScaleContext = sws_getContext( inputWidth, inputHeight, pixelFormat, mWidth, mHeight, pixelFormat, SWS_BILINEAR, NULL, NULL, NULL );
        if ( !mScaleContext )
            Fatal( "Unable to create scale context" );

        Debug( 1,"Scaling from %d x %d -> %d x %d", inputWidth, inputHeight, mWidth, mHeight );
        Debug( 1,"%d bytes -> %d bytes",  avpicture_get_size( pixelFormat, inputWidth, inputHeight ), avpicture_get_size( pixelFormat, mWidth, mHeight ) );

        // Make space for anything that is going to be output
        ByteBuffer outputBuffer;
        outputBuffer.size( avpicture_get_size( pixelFormat, mWidth, mHeight ) );

        // To get offsets only
        avpicture_fill( (AVPicture *)outputFrame, outputBuffer.data(), pixelFormat, mWidth, mHeight );

        while ( !mStop )
        {
            mQueueMutex.lock();
            if ( !mFrameQueue.empty() )
            {
                Debug( 3, "Got %zd frames on queue", mFrameQueue.size() );
                for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ )
                {
                    //const VideoFrame *frame = dynamic_cast<const VideoFrame *>(iter->get());
                    //FramePtr framePtr( *iter );
                    const FeedFrame *frame = (*iter).get();

                    if ( mWidth != inputWidth || mHeight != inputHeight )
                    {
                        // Requires conversion
                        Debug( 1,"%s / Provider: %s, Source: %s, Frame: %p (%ju / %.3f) - %lu", cname(), frame->provider()->cidentity(), frame->originator()->cidentity(), frame, frame->id(), frame->age(), frame->buffer().size() );

                        avpicture_fill( (AVPicture *)inputFrame, frame->buffer().data(), pixelFormat, inputWidth, inputHeight );

                        // Reformat the input frame to fit the desired output format
                        if ( sws_scale( mScaleContext, inputFrame->data, inputFrame->linesize, 0, inputHeight, outputFrame->data, outputFrame->linesize ) < 0 )
                            Fatal( "Unable to convert input frame (%dx%d) to output frame (%dx%d) at frame %ju", inputWidth, inputHeight, mWidth, mHeight, mFrameCount );

                        VideoFrame *videoFrame = new VideoFrame( this, *iter, mFrameCount, frame->timestamp(), outputBuffer );
                        distributeFrame( FramePtr( videoFrame ) );
                    }
                    else
                    {
                        // Send it out 'as is'
                        distributeFrame( *iter );
                    }
                    //delete *iter;
                    mFrameCount++;
                }
                mFrameQueue.clear();
            }
            mQueueMutex.unlock();
            checkProviders();
            // Quite short so we can always keep up with the required packet rate for 25/30 fps
            usleep( INTERFRAME_TIMEOUT );
        }
        FeedProvider::cleanup();
        FeedConsumer::cleanup();
        sws_freeContext( mScaleContext );
        mScaleContext = NULL;
    }
    av_free( outputFrame );
    av_free( inputFrame );
    return( !ended() );
}
/**
* @brief 
*
* @return 
*/
int LocalVideoInput::run()
{
    AVInputFormat *inputFormat = av_find_input_format( "video4linux2" );
    if ( inputFormat == NULL)
        Fatal( "Can't load input format" );

#if 0
    AVProbeData probeData;
    probeData.filename = mSource.c_str();
    probeData.buf = new unsigned char[1024];
    probeData.buf_size = 1024;
    inputFormat = av_probe_input_format( &probeData, 0 );
    if ( inputFormat == NULL)
        Fatal( "Can't probe input format" );

    AVFormatParameters formatParameters ;
    memset( &formatParameters, 0, sizeof(formatParameters) );
    formatParameters.channels = 1;
    formatParameters.channel = 0;
    formatParameters.standard = "PAL";
    formatParameters.pix_fmt = PIX_FMT_RGB24;
    //formatParameters.time_base.num = 1;
    //formatParameters.time_base.den = 10;
    formatParameters.width = 352;
    formatParameters.height = 288;
    //formatParameters.prealloced_context = 1;
#endif

    /* New API */
    AVDictionary *opts = NULL;
    av_dict_set( &opts, "standard", "PAL", 0 );
    av_dict_set( &opts, "video_size", "320x240", 0 );
    av_dict_set( &opts, "channel", "0", 0 );
    av_dict_set( &opts, "pixel_format", "rgb24", 0 );
    //av_dict_set( &opts, "framerate", "10", 0 );
    avDumpDict( opts );

    int avError = 0;
    AVFormatContext *formatContext = NULL;
    //if ( av_open_input_file( &formatContext, mSource.c_str(), inputFormat, 0, &formatParameters ) !=0 )
    if ( (avError = avformat_open_input( &formatContext, mSource.c_str(), inputFormat, &opts )) < 0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), avStrError(avError) );

    avDumpDict( opts );
#if 0
    if ( av_open_input_stream( &formatContext, 0, mSource.c_str(), inputFormat, &formatParameters ) !=0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), strerror(errno) );
#endif

    // Locate stream info from input
    if ( (avError = avformat_find_stream_info( formatContext, &opts )) < 0 )
        Fatal( "Unable to find stream info from %s due to: %s", mSource.c_str(), avStrError(avError) );
    
    if ( dbgLevel > DBG_INF )
        av_dump_format( formatContext, 0, mSource.c_str(), 0 );

    // Find first video stream present
    int videoStreamId = -1;
    for ( int i=0; i < formatContext->nb_streams; i++ )
    {
        if ( formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
        {
            videoStreamId = i;
            //set_context_opts( formatContext->streams[i]->codec, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
            break;
        }
    }
    if ( videoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mSource.c_str() );
    mStream = formatContext->streams[videoStreamId];
    mCodecContext = mStream->codec;

    // Try and get the codec from the codec context
    AVCodec *codec = NULL;
    if ( (codec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mSource.c_str() );

    // Open the codec
    if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 )
        Fatal( "Unable to open codec for video stream from %s", mSource.c_str() );

    //AVFrame *savedFrame = avcodec_alloc_frame();

    // Allocate space for the native video frame
    AVFrame *frame = avcodec_alloc_frame();

    // Determine required buffer size and allocate buffer
    int pictureSize = avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height );
    
    ByteBuffer frameBuffer( pictureSize );
    
    //avpicture_fill( (AVPicture *)savedFrame, mLastFrame.mBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height);

    AVPacket packet;
    while( !mStop )
    {
        int frameComplete = false;
        while ( !frameComplete && (av_read_frame( formatContext, &packet ) >= 0) )
        {
            Debug( 5, "Got packet from stream %d", packet.stream_index );
            if ( packet.stream_index == videoStreamId )
            {
                frameComplete = false;
                if ( avcodec_decode_video2( mCodecContext, frame, &frameComplete, &packet ) < 0 )
                    Fatal( "Unable to decode frame at frame %ju", mFrameCount );

                Debug( 3, "Decoded video packet at frame %ju, pts %jd", mFrameCount, packet.pts );

                if ( frameComplete )
                {
                    Debug( 3, "Got frame %d, pts %jd (%.3f)", mCodecContext->frame_number, frame->pkt_pts, (((double)(packet.pts-mStream->start_time)*mStream->time_base.num)/mStream->time_base.den) );

                    avpicture_layout( (AVPicture *)frame, mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height, frameBuffer.data(), frameBuffer.capacity() );

                    uint64_t timestamp = packet.pts;
                    //Debug( 3, "%d: TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );
                    //Info( "%ld:TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );

                    VideoFrame *videoFrame = new VideoFrame( this, mCodecContext->frame_number, timestamp, frameBuffer );
                    distributeFrame( FramePtr( videoFrame ) );
                }
            }
            av_free_packet( &packet );
        }
        usleep( INTERFRAME_TIMEOUT );
    }
    cleanup();

    av_freep( &frame );
    if ( mCodecContext )
    {
       avcodec_close( mCodecContext );
       mCodecContext = NULL; // Freed by avformat_close_input
    }
    if ( formatContext )
    {
        avformat_close_input( &formatContext );
        formatContext = NULL;
        //av_free( formatContext );
    }
    return( !ended() );
}
Exemple #19
0
void RenderThread::RenderFrames(FrameListPtr sourceFrames, YUV_PLANE plane)
{
	FrameImpl tempFrame;

	for (int i = 0; i < sourceFrames->size(); i++) 
	{
		FramePtr sourceFrameOrig = sourceFrames->at(i);
		if (!sourceFrameOrig)
		{
			continue;
		}
		unsigned int viewID = sourceFrameOrig->Info(VIEW_ID).toUInt();

		Frame* sourceFrame = sourceFrameOrig.data();
		float scaleX = 1;
		float scaleY = 1;
		if (plane != PLANE_COLOR)
		{
			COLOR_FORMAT c = sourceFrameOrig->Format()->Color();
			if (c == I420 || c == I422 || c == I444)
			{
				sourceFrame = &tempFrame;

				FormatPtr format = sourceFrameOrig->Format();
				sourceFrame->Format()->SetColor(Y800);
				sourceFrame->Format()->SetWidth(format->PlaneWidth(plane));
				sourceFrame->Format()->SetHeight(format->PlaneHeight(plane));
				sourceFrame->Format()->SetStride(0, format->Stride(plane));
				sourceFrame->Format()->PlaneSize(0);


				sourceFrame->SetData(0, sourceFrameOrig->Data(plane));

				scaleX = ((float)format->PlaneWidth(plane))/format->Width();
				scaleY = ((float)format->PlaneHeight(plane))/format->Height();
			}
		}
		
		int pos = -1;
		for (int k = 0; k < m_RenderFrames.size(); k++) 
		{
			FramePtr _frame = m_RenderFrames.at(k);
			if (_frame && _frame->Info(VIEW_ID).toUInt() == viewID)
			{
				pos = k;
				break;
			}
		}

		if (pos == -1)
		{
			pos = m_RenderFrames.size();
			m_RenderFrames.append(FramePtr());
		}

		FramePtr& renderFrame = m_RenderFrames[pos];
		// Deallocate if resolution changed
		if (renderFrame && (sourceFrame->Format()->Width() != 
			renderFrame->Format()->Width() || 
			sourceFrame->Format()->Height() != 
			renderFrame->Format()->Height()))
		{
			m_Renderer->Deallocate(renderFrame);
			renderFrame.clear();
		}

		// Allocate if needed
		if (!renderFrame)
		{
			m_Renderer->Allocate(renderFrame, sourceFrame->Format());
		}
		
		renderFrame->SetInfo(VIEW_ID, viewID);
		renderFrame->SetInfo(RENDER_SRC_SCALE_X, scaleX);
		renderFrame->SetInfo(RENDER_SRC_SCALE_Y, scaleY);

		// Render frame
		if (m_Renderer->GetFrame(renderFrame) == OK)
		{
			if (sourceFrame->Format() == renderFrame->Format())
			{
				for (int i=0; i<4; i++)
				{
					size_t len = renderFrame->Format()->PlaneSize(i);
					if (len > 0)
					{
						memcpy(renderFrame->Data(i), sourceFrame->Data(i), len);
					}
				}

			}else
			{
				ColorConversion(*sourceFrame, *renderFrame);
			}

			m_Renderer->ReleaseFrame(renderFrame);
		}
	}
}
Exemple #20
0
FramePtr Demodulator::decode(const FrameAudio& frame_audio)
{
    QVector<double> buffer;
    buffer.reserve(frame_audio.count());

    FIRFilter bandpass(FIRFilter::FIR_BANDPASS, settings_.window, SAMPLING_RATE, 220, 900, 2500);
    FIRFilter lowpass(FIRFilter::FIR_LOWPASS, settings_.window, 528000, 12000, SAMPLING_RATE / 2);
    int index = 0;
    for (double value : frame_audio) {
        double low = lowpass.process(value);
        if (index % 40 == 0) {
            buffer.push_back(bandpass.process(low));
        }
        index++;

        for (int i = 0; i < 10; ++i) {
            low = lowpass.process(0);
            if (index % 40 == 0) {
                buffer.push_back(bandpass.process(low));
            }
            index++;
        }
    }

    RingBuffer<double> bufs1200(BITS_PER_BYTE);
    RingBuffer<double> bufc1200(BITS_PER_BYTE);
    RingBuffer<double> bufs2200(BITS_PER_BYTE);
    RingBuffer<double> bufc2200(BITS_PER_BYTE);

    QVector<double> diff_buff;

    for (int i = 0; i < buffer.size(); ++i) {
      bufs1200.push_back(sin(M_PI * 2 / SAMPLING_RATE * 1200.0 * i) * buffer[i]);
      bufc1200.push_back(cos(M_PI * 2 / SAMPLING_RATE * 1200.0 * i) * buffer[i]);
      bufs2200.push_back(sin(M_PI * 2 / SAMPLING_RATE * 2200.0 * i) * buffer[i]);
      bufc2200.push_back(cos(M_PI * 2 / SAMPLING_RATE * 2200.0 * i) * buffer[i]);
      double low = std::hypotf(bufs1200.sum(), bufc1200.sum());
      double high = std::hypotf(bufs2200.sum() , bufc2200.sum());
      diff_buff.push_back(low - high);
    }

    QVector<double> diff_buff_center = diff_buff;
    qSort(diff_buff_center);
    double min = diff_buff_center[diff_buff_center.size() / 4];
    double max = diff_buff_center[diff_buff_center.size() / 4 + diff_buff_center.size() / 2];
    double center = diff_buff_center[diff_buff_center.size() / 2];

    for (int i = 0; i <= THRESHOLD_RESOLUTION; ++i) {
        FramePtr frame;
        process(settings_.verfy_fcs, center - std::fabs(center - min / THRESHOLD_WIDTH_RATIO) *
                (1.0 * i / THRESHOLD_RESOLUTION), diff_buff, &frame);
        if (frame) {
            return frame;
        }
        process(settings_.verfy_fcs, center + std::fabs(center - max / THRESHOLD_WIDTH_RATIO) *
                (1.0 * i / THRESHOLD_RESOLUTION), diff_buff, &frame);
        if (frame) {
            return frame;
        }
    }

    return FramePtr();
}
Exemple #21
0
FramePtr AbstractFrame::data(Playback p)
{
    AbstractFramePlayback *sp = dynamic_cast<AbstractFramePlayback*>(getPlayback(p));

    return FramePtr();
}