/** * @brief * * @return */ int QueuedVideoFilter::run() { if ( waitForProviders() ) { while ( !mStop ) { mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { Debug( 3, "Got %zd frames on queue", mFrameQueue.size() ); for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ ) { distributeFrame( *iter ); mFrameCount++; } mFrameQueue.clear(); } mQueueMutex.unlock(); checkProviders(); // Quite short so we can always keep up with the required packet rate for 25/30 fps usleep( INTERFRAME_TIMEOUT ); } } FeedProvider::cleanup(); FeedConsumer::cleanup(); return( !ended() ); }
/** * @brief * * @return */ int FilterSwapUV::run() { if ( waitForProviders() ) { uint16_t inputWidth = videoProvider()->width(); uint16_t inputHeight = videoProvider()->height(); PixelFormat inputPixelFormat = videoProvider()->pixelFormat(); ByteBuffer tempBuffer; int yChannelSize = inputWidth*inputHeight; int uvChannelSize = yChannelSize/4; if ( inputPixelFormat != PIX_FMT_YUV420P ) Fatal( "Can't swap UV for pixel format %d", inputPixelFormat ); while ( !mStop ) { mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { Debug( 3, "Got %zd frames on queue", mFrameQueue.size() ); for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ ) { //const VideoFrame *frame = dynamic_cast<const VideoFrame *>(iter->get()); //FramePtr framePtr( *iter ); const FeedFrame *frame = (*iter).get(); Debug(1, "%s / Provider: %s, Source: %s, Frame: %p (%ju / %.3f) - %lu", cname(), frame->provider()->cidentity(), frame->originator()->cidentity(), frame, frame->id(), frame->age(), frame->buffer().size() ); //Image image( inputPixelFormat, inputWidth, inputHeight, frame->buffer().data() ); tempBuffer.size( frame->buffer().size() ); memcpy( tempBuffer.data(), frame->buffer().data(), yChannelSize ); memcpy( tempBuffer.data()+yChannelSize, frame->buffer().data()+yChannelSize+uvChannelSize, uvChannelSize); memcpy( tempBuffer.data()+yChannelSize+uvChannelSize, frame->buffer().data()+yChannelSize, uvChannelSize); VideoFrame *videoFrame = new VideoFrame( this, *iter, mFrameCount, frame->timestamp(), tempBuffer ); distributeFrame( FramePtr( videoFrame ) ); //delete *iter; mFrameCount++; } mFrameQueue.clear(); } mQueueMutex.unlock(); checkProviders(); // Quite short so we can always keep up with the required packet rate for 25/30 fps usleep( INTERFRAME_TIMEOUT ); } } FeedProvider::cleanup(); FeedConsumer::cleanup(); return( !ended() ); }
/** * @brief * * @return */ int ImageTimestamper::run() { if ( waitForProviders() ) { uint16_t inputWidth = videoProvider()->width(); uint16_t inputHeight = videoProvider()->height(); PixelFormat inputPixelFormat = videoProvider()->pixelFormat(); while ( !mStop ) { mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { Debug( 3, "Got %zd frames on queue", mFrameQueue.size() ); for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ ) { //const VideoFrame *frame = dynamic_cast<const VideoFrame *>(iter->get()); //FramePtr framePtr( *iter ); const FeedFrame *frame = (*iter).get(); Debug(1, "%s / Provider: %s, Source: %s, Frame: %p (%ju / %.3f) - %lu", cname(), frame->provider()->cidentity(), frame->originator()->cidentity(), frame, frame->id(), frame->age(), frame->buffer().size() ); Image image( inputPixelFormat, inputWidth, inputHeight, frame->buffer().data() ); if ( timestampImage( &image, frame->timestamp() ) ) { VideoFrame *videoFrame = new VideoFrame( this, *iter, mFrameCount, frame->timestamp(), image.buffer() ); distributeFrame( FramePtr( videoFrame ) ); } else { distributeFrame( *iter ); } //delete *iter; mFrameCount++; } mFrameQueue.clear(); } mQueueMutex.unlock(); checkProviders(); // Quite short so we can always keep up with the required packet rate for 25/30 fps usleep( INTERFRAME_TIMEOUT ); } } FeedProvider::cleanup(); FeedConsumer::cleanup(); return( !ended() ); }
/** * @brief * * @return */ int LocalFileDump::run() { std::string filePath; FILE *fileDesc = NULL; if ( waitForProviders() ) { while( !mStop ) { mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ ) { const FeedFrame *frame = iter->get(); Info( "F:%ld", frame->buffer().size() ); if ( filePath.empty() ) { filePath = stringtf( "%s/%s-%s", mLocation.c_str(), mName.c_str(), frame->provider()->cidentity() ); Info( "Path: %s", filePath.c_str() ); fileDesc = fopen( filePath.c_str(), "w" ); if ( !fileDesc ) Fatal( "Failed to open dump file '%s': %s", filePath.c_str(), strerror(errno) ); } if ( fwrite( frame->buffer().data(), frame->buffer().size(), 1, fileDesc ) <= 0 ) Fatal( "Failed to write to dump file '%s': %s", filePath.c_str(), strerror(errno) ); //delete *iter; } mFrameQueue.clear(); } mQueueMutex.unlock(); checkProviders(); usleep( INTERFRAME_TIMEOUT ); } fclose( fileDesc ); } cleanup(); return( 0 ); }
int NotifyOutput::run() { if ( waitForProviders() ) { while( !mStop ) { mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ ) { processFrame( *iter ); } mFrameQueue.clear(); } mQueueMutex.unlock(); checkProviders(); usleep( INTERFRAME_TIMEOUT ); } } cleanup(); return( 0 ); }
/** * @brief * * @return */ int ImageScale::run() { AVFrame *inputFrame = av_frame_alloc(); AVFrame *outputFrame = av_frame_alloc(); if ( waitForProviders() ) { uint16_t inputWidth = videoProvider()->width(); uint16_t inputHeight = videoProvider()->height(); PixelFormat pixelFormat = videoProvider()->pixelFormat(); mWidth = inputWidth * mScale; mHeight = inputHeight * mScale; // Prepare for image format and size conversions mScaleContext = sws_getContext( inputWidth, inputHeight, pixelFormat, mWidth, mHeight, pixelFormat, SWS_BILINEAR, NULL, NULL, NULL ); if ( !mScaleContext ) Fatal( "Unable to create scale context" ); Debug( 1,"Scaling from %d x %d -> %d x %d", inputWidth, inputHeight, mWidth, mHeight ); Debug( 1,"%d bytes -> %d bytes", avpicture_get_size( pixelFormat, inputWidth, inputHeight ), avpicture_get_size( pixelFormat, mWidth, mHeight ) ); // Make space for anything that is going to be output ByteBuffer outputBuffer; outputBuffer.size( avpicture_get_size( pixelFormat, mWidth, mHeight ) ); // To get offsets only avpicture_fill( (AVPicture *)outputFrame, outputBuffer.data(), pixelFormat, mWidth, mHeight ); while ( !mStop ) { mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { Debug( 3, "Got %zd frames on queue", mFrameQueue.size() ); for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ ) { //const VideoFrame *frame = dynamic_cast<const VideoFrame *>(iter->get()); //FramePtr framePtr( *iter ); const FeedFrame *frame = (*iter).get(); if ( mWidth != inputWidth || mHeight != inputHeight ) { // Requires conversion Debug( 1,"%s / Provider: %s, Source: %s, Frame: %p (%ju / %.3f) - %lu", cname(), frame->provider()->cidentity(), frame->originator()->cidentity(), frame, frame->id(), frame->age(), frame->buffer().size() ); avpicture_fill( (AVPicture *)inputFrame, frame->buffer().data(), pixelFormat, inputWidth, inputHeight ); // Reformat the input frame to fit the desired output format if ( sws_scale( mScaleContext, inputFrame->data, inputFrame->linesize, 0, inputHeight, outputFrame->data, outputFrame->linesize ) < 0 ) Fatal( "Unable to convert input frame (%dx%d) to output frame (%dx%d) at frame %ju", inputWidth, inputHeight, mWidth, mHeight, mFrameCount ); VideoFrame *videoFrame = new VideoFrame( this, *iter, mFrameCount, frame->timestamp(), outputBuffer ); distributeFrame( FramePtr( videoFrame ) ); } else { // Send it out 'as is' distributeFrame( *iter ); } //delete *iter; mFrameCount++; } mFrameQueue.clear(); } mQueueMutex.unlock(); checkProviders(); // Quite short so we can always keep up with the required packet rate for 25/30 fps usleep( INTERFRAME_TIMEOUT ); } FeedProvider::cleanup(); FeedConsumer::cleanup(); sws_freeContext( mScaleContext ); mScaleContext = NULL; } av_free( outputFrame ); av_free( inputFrame ); return( !ended() ); }
/** * @brief * * @return */ int Mp4FileOutput::run() { //const int MAX_EVENT_HEAD_AGE = 2; ///< Number of seconds of video before event to save const int MAX_EVENT_TAIL_AGE = 3; ///< Number of seconds of video after event to save typedef enum { IDLE, PREALARM, ALARM, ALERT } AlarmState; if ( waitForProviders() ) { /* auto detect the output format from the name. default is mpeg. */ AVOutputFormat *outputFormat = av_guess_format( mExtension.c_str(), NULL, NULL ); if ( !outputFormat ) Fatal( "Could not deduce output format from '%s'", mExtension.c_str() ); //AVFormatContext *outputContext = openFile( outputFormat ); AVFormatContext *outputContext = NULL; double videoTimeOffset = 0.0L; uint64_t videoFrameCount = 0; AlarmState alarmState = IDLE; uint64_t alarmTime = 0; int eventCount = 0; while( !mStop ) { while( !mStop ) { mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ ) { const FeedFrame *frame = iter->get(); Debug( 3, "Frame type %d", frame->mediaType() ); if ( frame->mediaType() == FeedFrame::FRAME_TYPE_VIDEO ) { // This is an alarm detection frame const MotionFrame *motionFrame = dynamic_cast<const MotionFrame *>(frame); //const VideoProvider *provider = dynamic_cast<const VideoProvider *>(frame->provider()); AlarmState lastAlarmState = alarmState; uint64_t now = time64(); Debug( 3, "Motion frame, alarmed %d", motionFrame->alarmed() ); if ( motionFrame->alarmed() ) { alarmState = ALARM; alarmTime = now; if ( lastAlarmState == IDLE ) { // Create new event eventCount++; std::string path = stringtf( "%s/img-%s-%d-%ju.jpg", mLocation.c_str(), mName.c_str(), eventCount, motionFrame->id() ); //Info( "PF:%d @ %dx%d", motionFrame->pixelFormat(), motionFrame->width(), motionFrame->height() ); Image image( motionFrame->pixelFormat(), motionFrame->width(), motionFrame->height(), motionFrame->buffer().data() ); image.writeJpeg( path.c_str() ); } } else if ( lastAlarmState == ALARM ) { alarmState = ALERT; } else if ( lastAlarmState == ALERT ) { Debug( 3, "Frame age %.2lf", frame->age( alarmTime ) ); if ( (0.0l-frame->age( alarmTime )) > MAX_EVENT_TAIL_AGE ) alarmState = IDLE; } else { alarmState = IDLE; } Debug( 3, "Alarm state %d (%d)", alarmState, lastAlarmState ); } else { bool keyFrame = false; const uint8_t *startPos = h264StartCode( frame->buffer().head(), frame->buffer().tail() ); while ( startPos < frame->buffer().tail() ) { while( !*(startPos++) ) ; const uint8_t *nextStartPos = h264StartCode( startPos, frame->buffer().tail() ); int frameSize = nextStartPos-startPos; unsigned char type = startPos[0] & 0x1F; unsigned char nri = startPos[0] & 0x60; Debug( 3, "Frame Type %d, NRI %d (%02x), %d bytes, ts %jd", type, nri>>5, startPos[0], frameSize, frame->timestamp() ); if ( type == NAL_IDR_SLICE ) keyFrame = true; startPos = nextStartPos; } videoTimeOffset += (double)mVideoParms.frameRate().num / mVideoParms.frameRate().den; if ( keyFrame ) { // We can do file opening/closing now if ( alarmState != IDLE && !outputContext ) { outputContext = openFile( outputFormat ); videoTimeOffset = 0.0L; videoFrameCount = 0; } else if ( alarmState == IDLE && outputContext ) { closeFile( outputContext ); outputContext = NULL; } } /*if ( keyFrame && (videoTimeOffset >= mMaxLength) ) { closeFile( outputContext ); outputContext = openFile( outputFormat ); videoTimeOffset = 0.0L; videoFrameCount = 0; }*/ if ( outputContext ) { AVStream *videoStream = outputContext->streams[0]; AVCodecContext *videoCodecContext = videoStream->codec; AVPacket packet; av_init_packet(&packet); packet.flags |= keyFrame ? AV_PKT_FLAG_KEY : 0; packet.stream_index = videoStream->index; packet.data = (uint8_t*)frame->buffer().data(); packet.size = frame->buffer().size(); //packet.pts = packet.dts = AV_NOPTS_VALUE; packet.pts = packet.dts = (videoFrameCount * mVideoParms.frameRate().num * videoCodecContext->time_base.den) / (mVideoParms.frameRate().den * videoCodecContext->time_base.num); Info( "vfc: %ju, vto: %.2lf, kf: %d, pts: %jd", videoFrameCount, videoTimeOffset, keyFrame, packet.pts ); int result = av_interleaved_write_frame(outputContext, &packet); if ( result != 0 ) Fatal( "Error while writing video frame: %d", result ); } videoFrameCount++; } } mFrameQueue.clear(); } mQueueMutex.unlock(); checkProviders(); usleep( INTERFRAME_TIMEOUT ); } } if ( outputContext ) closeFile( outputContext ); } cleanup(); return 0; }