int RemoteCameraRtsp::Capture( Image &image )
{
    while ( true )
    {
        buffer.clear();
        if ( !rtspThread->isRunning() )
            break;
        //if ( rtspThread->stopped() )
            //break;
        if ( rtspThread->getFrame( buffer ) )
        {
            Debug( 3, "Read frame %d bytes", buffer.size() );
            Debug( 4, "Address %p", buffer.head() );
            Hexdump( 4, buffer.head(), 16 );

            static AVFrame *tmp_picture = NULL;

            if ( !tmp_picture )
            {
                //if ( c->pix_fmt != pf )
                //{
                    tmp_picture = avcodec_alloc_frame();
                    if ( !tmp_picture )
                    {
                        Panic( "Could not allocate temporary opicture" );
                    }
                    int size = avpicture_get_size( PIX_FMT_RGB24, width, height);
                    uint8_t *tmp_picture_buf = (uint8_t *)malloc(size);
                    if (!tmp_picture_buf)
                    {
                        av_free( tmp_picture );
                        Panic( "Could not allocate temporary opicture" );
                    }
                    avpicture_fill( (AVPicture *)tmp_picture, tmp_picture_buf, PIX_FMT_RGB24, width, height );
                //}
            }

            if ( !buffer.size() )
                return( -1 );

            AVPacket packet;
            av_init_packet( &packet );
            int initialFrameCount = frameCount;
            while ( buffer.size() > 0 )
            {
                int got_picture = false;
                packet.data = buffer.head();
                packet.size = buffer.size();
                int len = avcodec_decode_video2( codecContext, picture, &got_picture, &packet );
                if ( len < 0 )
                {
                    if ( frameCount > initialFrameCount )
                    {
                        // Decoded at least one frame
                        return( 0 );
                    }
                    Error( "Error while decoding frame %d", frameCount );
                    Hexdump( ZM_DBG_ERR, buffer.head(), buffer.size()>256?256:buffer.size() );
                    buffer.clear();
                    continue;
                    //return( -1 );
                }
                Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
                //if ( buffer.size() < 400 )
                    //Hexdump( 0, buffer.head(), buffer.size() );

                if ( got_picture )
                {
                    /* the picture is allocated by the decoder. no need to free it */
                    Debug( 1, "Got picture %d", frameCount );

                    static struct SwsContext *img_convert_ctx = 0;

                    if ( !img_convert_ctx )
                    {
                        img_convert_ctx = sws_getCachedContext( NULL, codecContext->width, codecContext->height, codecContext->pix_fmt, width, height, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL );
                        if ( !img_convert_ctx )
                            Panic( "Unable to initialise image scaling context" );
                    }

                    sws_scale( img_convert_ctx, picture->data, picture->linesize, 0, height, tmp_picture->data, tmp_picture->linesize );

                    image.Assign( width, height, colours, tmp_picture->data[0] );

                    frameCount++;

                    return( 0 );
                }
                else
                {
                    Warning( "Unable to get picture from frame" );
                }
                buffer -= len;
            }
        }
    }
    return( -1 );
}
bool VideoReaderUnit::OpenStreams(StreamSet* set) {
  // Setup FFMPEG.
  if (!ffmpeg_initialized_) {
    ffmpeg_initialized_ = true;
    av_register_all();
  }

  // Open video file.
  AVFormatContext* format_context = nullptr;
  if (avformat_open_input (&format_context, video_file_.c_str(), nullptr, nullptr) != 0) {
    LOG(ERROR) << "Could not open file: " << video_file_;
    return false;
  }

  if (avformat_find_stream_info(format_context, nullptr) < 0) {
    LOG(ERROR) << video_file_ << " is not a valid movie file.";
    return false;
  }

  // Get video stream index.
  video_stream_idx_ = -1;

  for (uint i = 0; i < format_context->nb_streams; ++i) {
    if (format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
      video_stream_idx_ = i;
      break;
    }
  }

  if (video_stream_idx_ < 0) {
    LOG(ERROR) << "Could not find video stream in " << video_file_;
    return false;
  }

  AVCodecContext* codec_context = format_context->streams[video_stream_idx_]->codec;
  AVCodec* codec = avcodec_find_decoder (codec_context->codec_id);

  if (!codec) {
    LOG(ERROR) << "Unsupported codec for file " << video_file_;
    return false;
  }

  if (avcodec_open2(codec_context, codec, nullptr) < 0) {
    LOG(ERROR) << "Could not open codec";
    return false;
  }

  AVStream* av_stream = format_context->streams[video_stream_idx_];
  fps_ = av_q2d(av_stream->avg_frame_rate);
  LOG(INFO) << "Frame rate: " << fps_;

  // if av_q2d wasn't able to figure out the frame rate, set it 24
  if (fps_ != fps_) {
    LOG(WARNING) << "Can't figure out frame rate - Defaulting to 24";
    fps_ = 24;
  }

  // Limit to meaningful values. Sometimes avg_frame_rate.* holds garbage.
  if (fps_ < 5) {
    LOG(WARNING) << "Capping video fps_ of " << fps_ << " to " << 5;
    fps_ = 5;
  }

  if (fps_ > 60) {
    LOG(WARNING) << "Capping video fps_ of " << fps_ << " to " << 60;
    fps_ = 60;
  }

  bytes_per_pixel_ = PixelFormatToNumChannels(options_.pixel_format);
  frame_width_ = codec_context->width;
  frame_height_ = codec_context->height;

  switch (options_.downscale) {
    case VideoReaderOptions::DOWNSCALE_NONE:
      output_width_ = frame_width_;
      output_height_ = frame_height_;
      downscale_factor_ = 1.0f;
      break;

    case VideoReaderOptions::DOWNSCALE_BY_FACTOR:
      if (options_.downscale_factor > 1.0f) {
        LOG(ERROR) << "Only downscaling is supported.";
        return false;
      }

      downscale_factor_ = options_.downscale_factor;
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;

    case VideoReaderOptions::DOWNSCALE_TO_MIN_SIZE:
      downscale_factor_ = std::max(options_.downscale_size * (1.0f / frame_width_),
                                   options_.downscale_size * (1.0f / frame_height_));
      // Cap to downscaling.
      downscale_factor_ = std::min(1.0f, downscale_factor_);
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;

    case VideoReaderOptions::DOWNSCALE_TO_MAX_SIZE:
      downscale_factor_ = std::min(options_.downscale_size * (1.0f / frame_width_),
                                   options_.downscale_size * (1.0f / frame_height_));
      // Cap to downscaling.
      downscale_factor_ = std::min(1.0f, downscale_factor_);
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;
  }

  if (downscale_factor_ != 1.0) {
    LOG(INFO) << "Downscaling by factor " << downscale_factor_
              << " from " << frame_width_ << ", " << frame_height_
              << " to " << output_width_ << ", " << output_height_;
  }

  // Force even resolutions.
  output_width_ += output_width_ % 2;
  output_width_step_ = output_width_ * bytes_per_pixel_;

  // Pad width_step to be a multiple of 4.
  if (output_width_step_ % 4 != 0) {
    output_width_step_ += 4 - output_width_step_ % 4;
    DCHECK_EQ(output_width_step_ % 4, 0);
  }

  // Save some infos for later use.
  codec_ = codec;
  codec_context_ = codec_context;
  format_context_ = format_context;

  // Allocate temporary structures.
  frame_yuv_ = av_frame_alloc();
  frame_bgr_ = av_frame_alloc();

  if (!frame_yuv_ || !frame_bgr_) {
    LOG(ERROR) << "Could not allocate AVFrames.";
    return false;
  }

  int pix_fmt;
  switch (options_.pixel_format) {
    case PIXEL_FORMAT_RGB24:
      pix_fmt = PIX_FMT_RGB24;
      break;
    case PIXEL_FORMAT_BGR24:
      pix_fmt = PIX_FMT_BGR24;
      break;
    case PIXEL_FORMAT_ARGB32:
      pix_fmt = PIX_FMT_ARGB;
      break;
    case PIXEL_FORMAT_ABGR32:
      pix_fmt = PIX_FMT_ABGR;
      break;
    case PIXEL_FORMAT_RGBA32:
      pix_fmt = PIX_FMT_RGBA;
      break;
    case PIXEL_FORMAT_BGRA32:
      pix_fmt = PIX_FMT_BGRA;
      break;
    case PIXEL_FORMAT_YUV422:
      pix_fmt = PIX_FMT_YUYV422;
      break;
    case PIXEL_FORMAT_LUMINANCE:
      pix_fmt = PIX_FMT_GRAY8;
      break;
  }

  uint8_t* bgr_buffer = (uint8_t*)av_malloc(avpicture_get_size((::PixelFormat)pix_fmt,
                                                               output_width_,
                                                               output_height_));

  avpicture_fill((AVPicture*)frame_bgr_,
                 bgr_buffer,
                 (::PixelFormat)pix_fmt,
                 output_width_,
                 output_height_);

  // Setup SwsContext for color conversion.
  sws_context_ = sws_getContext(frame_width_,
                                frame_height_,
                                codec_context_->pix_fmt,
                                output_width_,
                                output_height_,
                                (::PixelFormat)pix_fmt,
                                SWS_BICUBIC,
                                nullptr,
                                nullptr,
                                nullptr);
  if(!sws_context_) {
    LOG(ERROR) << "Could not setup SwsContext for color conversion.";
    return false;
  }

  current_pos_ = 0;
  used_as_root_ = set->empty();
  VideoStream* vid_stream = new VideoStream(output_width_,
                                            output_height_,
                                            output_width_step_,
                                            fps_,
                                            options_.pixel_format,
                                            options_.stream_name);

  vid_stream->set_original_width(frame_width_);
  vid_stream->set_original_height(frame_height_);

  set->push_back(shared_ptr<VideoStream>(vid_stream));
  frame_num_ = 0;
  return true;
}
示例#3
0
// --------------------------------------------------------------------------
// ARDrone::initVideo()
// Description  : Initialize video.
// Return value : SUCCESS: 1  FAILURE: 0
// --------------------------------------------------------------------------
int ARDrone::initVideo(void)
{
    // AR.Drone 2.0
    if (version.major == ARDRONE_VERSION_2) {
        // Open the IP address and port
        char filename[256];
        sprintf(filename, "tcp://%s:%d", ip, ARDRONE_VIDEO_PORT);
        if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) < 0) {
            CVDRONE_ERROR("avformat_open_input() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Retrive and dump stream information
        avformat_find_stream_info(pFormatCtx, NULL);
        av_dump_format(pFormatCtx, 0, filename, 0);

        // Find the decoder for the video stream
        pCodecCtx = pFormatCtx->streams[0]->codec;
        AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (pCodec == NULL) {
            CVDRONE_ERROR("avcodec_find_decoder() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Open codec
        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
            CVDRONE_ERROR("avcodec_open2() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Allocate video frames and a buffer
        pFrame = avcodec_alloc_frame();
        pFrameBGR = avcodec_alloc_frame();
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));

        // Assign appropriate parts of buffer to image planes in pFrameBGR
        avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);

        // Convert it to BGR
        pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
    }
    // AR.Drone 1.0
    else {
        // Open the IP address and port
        if (!sockVideo.open(ip, ARDRONE_VIDEO_PORT)) {
            CVDRONE_ERROR("UDPSocket::open(port=%d) was failed. (%s, %d)\n", ARDRONE_VIDEO_PORT, __FILE__, __LINE__);
            return 0;
        }

        // Set codec
        //pCodecCtx = avcodec_alloc_context();
        pCodecCtx=avcodec_alloc_context3(NULL);
        pCodecCtx->width = 320;
        pCodecCtx->height = 240;

        // Allocate a buffer
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height));
    }

    // Allocate an IplImage
    img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
    if (!img) {
        CVDRONE_ERROR("cvCreateImage() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    // Clear the image
    cvZero(img);

    // Create a mutex
    mutexVideo = new pthread_mutex_t;
    pthread_mutex_init(mutexVideo, NULL);

    // Create a thread
    threadVideo = new pthread_t;
    if (pthread_create(threadVideo, NULL, runVideo, this) != 0) {
        CVDRONE_ERROR("pthread_create() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    return 1;
}
示例#4
0
	VideoStream::VideoStream(const std::string& filename, unsigned int numFrameBuffered, GLenum minFilter, GLenum magFilter, GLenum sWrapping, GLenum tWrapping, int maxLevel)
	 : __ReadOnly_ComponentLayout(declareLayout(numFrameBuffered)), InputDevice(declareLayout(numFrameBuffered), "Reader"), idVideoStream(0), readFrameCount(0), timeStampFrameRate(1.0f), timeStampOffset(0), timeStampOfLastFrameRead(0), endReached(false),
	   pFormatCtx(NULL), pCodecCtx(NULL), pCodec(NULL), pFrame(NULL), pFrameRGB(NULL), buffer(NULL), pSWSCtx(NULL), idCurrentBufferForWritting(0)
	{
		#ifdef __USE_PBO__
			#ifdef __VIDEO_STREAM_VERBOSE__
				std::cout << "VideoStream::VideoStream - Using PBO for uploading data to the GPU." << std::endl;
			#endif
			pbo = NULL;
		#else
			#ifdef __VIDEO_STREAM_VERBOSE__
				std::cout << "VideoStream::VideoStream - Using standard method HdlTexture::write for uploading data to the GPU." << std::endl;
			#endif
		#endif

		int retCode = 0;

		// Open stream :
		//DEPRECATED IN libavformat : retCode = av_open_input_file(&pFormatCtx, filename.c_str(), NULL, 0, NULL)!=0);
		retCode = avformat_open_input(&pFormatCtx, filename.c_str(), NULL, NULL);

		if(retCode!=0)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at av_open_input_file).", __FILE__, __LINE__);

		// Find stream information :
		//DEPRECATED : retCode = av_find_stream_info(pFormatCtx);
		retCode = avformat_find_stream_info(pFormatCtx, NULL);

		if(retCode<0)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at av_find_stream_info).", __FILE__, __LINE__);

		// Walk through pFormatCtx->nb_streams to find a/the first video stream :
		for(idVideoStream=0; idVideoStream<pFormatCtx->nb_streams; idVideoStream++)
			//DEPRECATED : if(pFormatCtx->streams[idVideoStream]->codec->codec_type==CODEC_TYPE_VIDEO)
			if(pFormatCtx->streams[idVideoStream]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
				break;

		if(idVideoStream>=pFormatCtx->nb_streams)
			throw Exception("VideoStream::VideoStream - Failed to find video stream (at streams[idVideoStream]->codec->codec_type==CODEC_TYPE_VIDEO).", __FILE__, __LINE__);

		// Get a pointer to the codec context for the video stream :
		pCodecCtx = pFormatCtx->streams[idVideoStream]->codec;

		// Find the decoder for the video stream :
		pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

		if(pCodec==NULL)
			throw Exception("VideoStream::VideoStream - No suitable codec found (at avcodec_find_decoder).", __FILE__, __LINE__);

		// Open codec :
		//DEPRECATED : retCode = avcodec_open(pCodecCtx, pCodec);
		retCode = avcodec_open2(pCodecCtx, pCodec, NULL);

		if(retCode<0)
			throw Exception("VideoStream::VideoStream - Could not open codec (at avcodec_open).", __FILE__, __LINE__);

		// Get the framerate :
		/*float timeUnit_sec = static_cast<float>(pCodecCtx->time_base.num)/static_cast<float>(pCodecCtx->time_base.den);
		frameRate = 1.0f/(pCodecCtx->timeUnit_sec;*/

		timeStampFrameRate = static_cast<float>(pFormatCtx->streams[idVideoStream]->time_base.den)/static_cast<float>(pFormatCtx->streams[idVideoStream]->time_base.num);

		// Get the duration :
		duration_sec = pFormatCtx->duration / AV_TIME_BASE;

		#ifdef __VIDEO_STREAM_VERBOSE__
			std::cout << "VideoStream::VideoStream" << std::endl;
			std::cout << "                         - Frame rate : " << timeStampFrameRate << " frames per second (for time stamps)" << std::endl;
			std::cout << "                         - Duration   : " << duration_sec << " second(s)" << std::endl;
		#endif

		// Allocate video frame :
		pFrame = avcodec_alloc_frame();

		// Allocate an AVFrame structure :
		pFrameRGB = avcodec_alloc_frame();

		if(pFrameRGB==NULL)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at avcodec_alloc_frame).", __FILE__, __LINE__);

		// Determine required buffer size and allocate buffer :
		bufferSizeBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
		buffer = (uint8_t *)av_malloc(bufferSizeBytes*sizeof(uint8_t));

		#ifdef __VIDEO_STREAM_VERBOSE__
			std::cout << "VideoStream::VideoStream - Frame size : " << pCodecCtx->width << "x" << pCodecCtx->height << std::endl;
		#endif

		if(buffer==NULL)
			throw Exception("VideoStream::VideoStream - Unable to allocate video frame buffer.", __FILE__, __LINE__);

		// Assign appropriate parts of buffer to image planes in pFrameRGB (Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture) :
		avpicture_fill( (AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

		// Initialize libswscale :
		pSWSCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, SWS_POINT, NULL, NULL, NULL);

		// Create format :
		HdlTextureFormat frameFormat(pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, minFilter, magFilter, sWrapping, tWrapping, 0, maxLevel);

		// Create the texture :
		for(unsigned int i=0; i<numFrameBuffered; i++)
		{
			//old : addOutputPort("output" + to_string(i));
			textureBuffers.push_back( new HdlTexture(frameFormat) );

			// YOU MUST WRITE ONCE IN THE TEXTURE BEFORE USING PBO::copyToTexture ON IT.
			// We are also doing this to prevent reading from an empty (not-yet-allocated) texture.
			textureBuffers.back()->fill(0);

			// Set links :
			setTextureLink(textureBuffers.back(), i);
		}

		#ifdef __USE_PBO__
			// Create PBO for uplodaing data to GPU :
			pbo = new HdlPBO(frameFormat, GL_PIXEL_UNPACK_BUFFER_ARB,GL_STREAM_DRAW_ARB);
		#endif

		// Finish by forcing read of first frame :
		readNextFrame();
	}
示例#5
0
void init_encode(uint8_t *imgbuffer, char *palette)
{
  /* register all the codecs */
  //avcodec_register_all();
  REGISTER_ENCODER(MJPEG, mjpeg);
  REGISTER_PARSER(MJPEG, mjpeg);

  //set the buffer with the captured frame
  inbuffer = imgbuffer;

  //set pixel format
  if (palette == "BGR32") {
    raw_pix_fmt = AV_PIX_FMT_BGR32;
  } else if (palette == "RGB24") {
    raw_pix_fmt = AV_PIX_FMT_RGB24 ;
  } else if (palette == "RGB32") {
    raw_pix_fmt = AV_PIX_FMT_RGB32;
  } else if (palette == "YUYV") {
    raw_pix_fmt = AV_PIX_FMT_YUYV422;
  } else if (palette == "YUV420") {
    raw_pix_fmt = AV_PIX_FMT_YUV420P;
  } else if (palette == "GREY") {
    raw_pix_fmt = AV_PIX_FMT_GRAY8;
  } else {
    raw_pix_fmt = AV_PIX_FMT_BGR24;  // default!
  }

  //calculate the bytes needed for the output image
  int nbytes = avpicture_get_size(YUV_PIX_FMT, out_width, out_height);

  //create buffer for the output image
  outbuffer = (uint8_t*)av_malloc(nbytes);

  //create ffmpeg frame structures.  These do not allocate space for image data,
  //just the pointers and other information about the image.
  inpic  = avcodec_alloc_frame();
  outpic = avcodec_alloc_frame();

  //this will set the pointers in the frame structures to the right points in
  //the input and output buffers.
  avpicture_fill((AVPicture*)inpic,  inbuffer,  raw_pix_fmt, in_width,  in_height);
  avpicture_fill((AVPicture*)outpic, outbuffer, YUV_PIX_FMT, out_width, out_height);

  //create the conversion context
  sws_ctx = sws_getContext(in_width,  in_height,  raw_pix_fmt,
                              out_width, out_height, YUV_PIX_FMT,
                              SWS_FAST_BILINEAR, NULL, NULL, NULL);

  /* find the mjpeg video encoder */
  codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
  if (!codec) {
      fprintf(stderr, "encode.c: codec not found\n");
      exit(1);
  }

  //  Allocate/init a context
  c = avcodec_alloc_context3(codec);
  if (!c) {
      fprintf(stderr, "encode.c: could not allocate video codec context\n");
      exit(1);
  }

  /* put sample parameters */
  c->bit_rate = 400000;
  /* resolution must be a multiple of two */
  c->width = 320;
  c->height = 240;
  /* frames per second */
  c->time_base = (AVRational){1,25};
  c->pix_fmt = JPG_PIX_FMT;

  init_ok = 1;
}
int FfmpegCamera::Capture( Image &image )
{
    if (!mCanCapture){
        return -1;
    }
    
    // If the reopen thread has a value, but mCanCapture != 0, then we have just reopened the connection to the ffmpeg device, and we can clean up the thread.
    if (mReopenThread != 0) {
        void *retval = 0;
        int ret;
        
        ret = pthread_tryjoin_np(mReopenThread, &retval);
        if (ret != 0){
            Error("Could not join reopen thread.");
        }
        
        Info( "Successfully reopened stream." );
        mReopenThread = 0;
    }

	AVPacket packet;
	uint8_t* directbuffer;
   
	/* Request a writeable buffer of the target image */
	directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
	if(directbuffer == NULL) {
		Error("Failed requesting writeable buffer for the captured image.");
		return (-1);
	}
    
    int frameComplete = false;
    while ( !frameComplete )
    {
        int avResult = av_read_frame( mFormatContext, &packet );
        if ( avResult < 0 )
        {
            char errbuf[AV_ERROR_MAX_STRING_SIZE];
            av_strerror(avResult, errbuf, AV_ERROR_MAX_STRING_SIZE);
            if (
                // Check if EOF.
                (avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) ||
                // Check for Connection failure.
                (avResult == -110)
            )
            {
                Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf);
                ReopenFfmpeg();
            }

            Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, avResult, errbuf );
            return( -1 );
        }
        Debug( 5, "Got packet from stream %d", packet.stream_index );
        if ( packet.stream_index == mVideoStreamId )
        {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(52, 25, 0)
			if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 )
#else
			if ( avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size ) < 0 )
#endif
                Fatal( "Unable to decode frame at frame %d", frameCount );

            Debug( 4, "Decoded video packet at frame %d", frameCount );

            if ( frameComplete )
            {
                Debug( 3, "Got frame %d", frameCount );

		avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
		
#if HAVE_LIBSWSCALE
		if(mConvertContext == NULL) {
			if(config.cpu_extensions && sseversion >= 20) {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC | SWS_CPU_CAPS_SSE2, NULL, NULL, NULL );
			} else {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
			}
			if(mConvertContext == NULL)
				Fatal( "Unable to create conversion context for %s", mPath.c_str() );
		}
	
		if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
			Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
		Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
#endif // HAVE_LIBSWSCALE
 
                frameCount++;
            }
        }
        av_free_packet( &packet );
    }
    return (0);
}
示例#7
0
/*!
  Allocates and initializes the parameters depending on the video and the desired color type.
  One the stream is opened, it is possible to get the video encoding framerate getFramerate(),
  and the dimension of the images using getWidth() and getHeight().
  
  \param filename : Path to the video which has to be read.
  \param colortype : Desired color map used to open the video.
  The parameter can take two values : COLORED and GRAY_SCALED.
  
  \return It returns true if the paramters could be initialized. Else it returns false.
*/
bool vpFFMPEG::openStream(const char *filename, vpFFMPEGColorType colortype)
{
  this->color_type = colortype;
  
  av_register_all();
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,0,0) // libavformat 52.84.0
  if (av_open_input_file (&pFormatCtx, filename, NULL, 0, NULL) != 0)
#else
  if (avformat_open_input (&pFormatCtx, filename, NULL, NULL) != 0) // libavformat 53.4.0
#endif
  {
    vpTRACE("Couldn't open file ");
    return false;
  }

#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,21,0) // libavformat 53.21.0
  if (av_find_stream_info (pFormatCtx) < 0)
#else 
  if (avformat_find_stream_info (pFormatCtx, NULL) < 0)
#endif
      return false;
  
  videoStream = 0;
  bool found_codec = false;
  
  /*
  * Detect streams types
  */
  for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++)
  {
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51,0,0)
    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) // avutil 50.33.0
#else
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) // avutil 51.9.1
#endif
    {
      videoStream = i;
      //std::cout << "rate: " << pFormatCtx->streams[i]->r_frame_rate.num << " " << pFormatCtx->streams[i]->r_frame_rate.den << std::endl;
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(55,12,0)
      framerate_stream =  pFormatCtx->streams[i]->r_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->r_frame_rate.den;
#else
      framerate_stream =  pFormatCtx->streams[i]->avg_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->avg_frame_rate.den;
#endif
      found_codec= true;
      break;
    }
  }

  if (found_codec)
  {
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

    if (pCodec == NULL)
    {
      vpTRACE("unsuported codec");
      return false;		// Codec not found
    }
    
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,35,0) // libavcodec 53.35.0
    if (avcodec_open (pCodecCtx, pCodec) < 0)
#else
    if (avcodec_open2 (pCodecCtx, pCodec, NULL) < 0)
#endif
    {
      vpTRACE("Could not open codec");
      return false;		// Could not open codec
    }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
    pFrame = avcodec_alloc_frame();
#else
    pFrame = av_frame_alloc(); // libavcodec 55.34.1
#endif

    if (color_type == vpFFMPEG::COLORED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameRGB=avcodec_alloc_frame();
#else
      pFrameRGB=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameRGB == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height);
    }
    
    else if (color_type == vpFFMPEG::GRAY_SCALED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameGRAY=avcodec_alloc_frame();
#else
      pFrameGRAY=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameGRAY == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_GRAY8,pCodecCtx->width,pCodecCtx->height);
    }  

    /*
     * Determine required buffer size and allocate buffer
     */
    width = pCodecCtx->width ;
    height = pCodecCtx->height ;
    buffer = (uint8_t *) malloc ((unsigned int)(sizeof (uint8_t)) * (unsigned int)numBytes);
  }
  else
  {
    vpTRACE("Didn't find a video stream");
    return false;
  }
  
  if (color_type == vpFFMPEG::COLORED)
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
  
  else if (color_type == vpFFMPEG::GRAY_SCALED)
    avpicture_fill((AVPicture *)pFrameGRAY, buffer, PIX_FMT_GRAY8, pCodecCtx->width, pCodecCtx->height);
  
  streamWasOpen = true;

  return true;
}
示例#8
0
static int video_open(video_t *video, const char *filename) {
    video->format = PIX_FMT_RGB24;
    if (avformat_open_input(&video->format_context, filename, NULL, NULL) ||
            avformat_find_stream_info(video->format_context, NULL) < 0) {
        fprintf(stderr, ERROR("cannot open video stream %s\n"), filename);
        goto failed;
    }

    video->stream_idx = -1;
    for (int i = 0; i < video->format_context->nb_streams; i++) {
        if (video->format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video->stream_idx = i;
            break;
        }
    }

    if (video->stream_idx == -1) {
        fprintf(stderr, ERROR("cannot find video stream\n"));
        goto failed;
    }

    AVStream *stream = video->format_context->streams[video->stream_idx];
    video->codec_context = stream->codec;
    video->codec = avcodec_find_decoder(video->codec_context->codec_id);

    /* Save Width/Height */
    video->width = video->codec_context->width;
    video->height = video->codec_context->height;

	
    if (!video->codec || avcodec_open2(video->codec_context, video->codec, NULL) < 0) {
        fprintf(stderr, ERROR("cannot open codec\n"));
        goto failed;
    }

    video->buffer_width = video->codec_context->width;
    video->buffer_height = video->codec_context->height;

    fprintf(stderr, INFO("pixel aspect ratio: %d/%d, size: %dx%d buffer size: %dx%d\n"), 
        video->codec_context->sample_aspect_ratio.num,
        video->codec_context->sample_aspect_ratio.den,
        video->width,
        video->height,
        video->buffer_width,
        video->buffer_height
    );

    video->par = (float)video->codec_context->sample_aspect_ratio.num / video->codec_context->sample_aspect_ratio.den;
    if (video->par == 0)
        video->par = 1;

    /* Frame rate fix for some codecs */
    if (video->codec_context->time_base.num > 1000 && video->codec_context->time_base.den == 1)
        video->codec_context->time_base.den = 1000;

    /* Get FPS */
    // http://libav-users.943685.n4.nabble.com/Retrieving-Frames-Per-Second-FPS-td946533.html
    if ((stream->time_base.den != stream->r_frame_rate.num) ||
            (stream->time_base.num != stream->r_frame_rate.den)) {
        video->fps = 1.0 / stream->r_frame_rate.den * stream->r_frame_rate.num;
    } else {
        video->fps = 1.0 / stream->time_base.num * stream->time_base.den;
    }
    fprintf(stderr, INFO("fps: %lf\n"), video->fps);

    /* Get framebuffers */
    video->raw_frame = avcodec_alloc_frame();
    video->scaled_frame = avcodec_alloc_frame();

    if (!video->raw_frame || !video->scaled_frame) {
        fprintf(stderr, ERROR("cannot preallocate frames\n"));
        goto failed;
    }

    /* Create data buffer */
    video->buffer = av_malloc(avpicture_get_size(
        video->format, 
        video->buffer_width, 
        video->buffer_height
    ));

    /* Init buffers */
    avpicture_fill(
        (AVPicture *) video->scaled_frame, 
        video->buffer, 
        video->format, 
        video->buffer_width, 
        video->buffer_height
    );

    /* Init scale & convert */
    video->scaler = sws_getContext(
        video->buffer_width,
        video->buffer_height,
        video->codec_context->pix_fmt,
        video->buffer_width, 
        video->buffer_height, 
        video->format, 
        SWS_BICUBIC, 
        NULL, 
        NULL, 
        NULL
    );

    if (!video->scaler) {
        fprintf(stderr, ERROR("scale context init failed\n"));
        goto failed;
    }

    /* Give some info on stderr about the file & stream */
    av_dump_format(video->format_context, 0, filename, 0);
    return 1;
failed:
    video_free(video);
    return 0;
}
示例#9
0
/**
 * \brief copy frame data from buffer to AVFrame, handling stride.
 * \param f destination AVFrame
 * \param src source buffer, does not use any line-stride
 * \param width width of the video frame
 * \param height height of the video frame
 */
static void copy_frame(AVFrame *f, const uint8_t *src,
                       int width, int height) {
    AVPicture pic;
    avpicture_fill(&pic, src, PIX_FMT_YUV420P, width, height);
    av_picture_copy((AVPicture *)f, &pic, PIX_FMT_YUV420P, width, height);
}
示例#10
0
//Thread d'initialisation
void *drawingAndParam(void * arg)
{
	string winParametrage = "Thresholded";
	string winDetected = "Parametrages";
	char key;
	drawing = false;
	onDrawing = true;
	pthread_mutex_init(&mutexVideo, NULL);
#if output_video == ov_remote_ffmpeg
	int errorcode = avformat_open_input(&pFormatCtx, "tcp://192.168.1.1:5555", NULL, NULL);
	if (errorcode < 0) {
		cout << "ERREUR CAMERA DRONE!!!" << errorcode;
		return 0;
	}
	avformat_find_stream_info(pFormatCtx, NULL);
	av_dump_format(pFormatCtx, 0, "tcp://192.168.1.1:5555", 0);
	pCodecCtx = pFormatCtx->streams[0]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		cout << "ERREUR avcodec_find_decoder!!!";
		return 0;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		cout << "ERREUR avcodec_open2!!!";
		return 0;
	}
	//pFrame = av_frame_alloc();
	//pFrameBGR = av_frame_alloc();
	pFrame = avcodec_alloc_frame();
	pFrameBGR = avcodec_alloc_frame();
	bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));
	avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
	img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
	if (!img) {
		cout << "ERREUR PAS D'IMAGE!!!";
		return 0;
	}

	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);

#else	
	VideoCapture cap(0); //capture video webcam

#endif
	HH=179;LS=1;HS=255;LV=1;HV=255;LH=1;
	namedWindow(winDetected, CV_WINDOW_NORMAL);
	Mat frame;
	setMouseCallback(winDetected, MouseCallBack, NULL);
	while(true)
	{	
		if(onDrawing) //Tant que l'utilisateur ne commence pas la sélection!
		{
			#if output_video != ov_remote_ffmpeg
				bool bSuccess = cap.read(frame); // Nouvelle capture
			if (!bSuccess) {
				cout << "Impossible de lire le flux video" << endl;
				break;
			}
			#else
				pthread_mutex_lock(&mutexVideo);
				memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
				pthread_mutex_unlock(&mutexVideo);
				frame = cv::cvarrToMat(img, true);
			#endif
		imshow(winDetected, frame);
		}
		if(!onDrawing && !drawing) //On affiche en direct la sélection de l'utilisateur
		{
			Mat tmpFrame=frame.clone();
			rectangle(tmpFrame, rec, CV_RGB(51,156,204),1,8,0);
			imshow(winDetected, tmpFrame);
		}
		if(drawing) //L'utilisateur a fini de sélectionner
		{
			//cible Ball(1);
			namedWindow(winParametrage, CV_WINDOW_NORMAL);
			setMouseCallback(winDetected, NULL, NULL);	
			rectangle(frame, rec, CV_RGB(51,156,204),2,8,0);
			imshow(winDetected, frame);
			Mat selection = frame(rec);
			Ball.setPicture(selection);
			while(key != 'q')
			{
				//Trackbar pour choix de la couleur
				createTrackbar("LowH", winParametrage, &LH, 179); //Hue (0 - 179)
				createTrackbar("HighH", winParametrage, &HH, 179);
				//Trackbar pour Saturation comparer au blanc
				createTrackbar("LowS", winParametrage, &LS, 255); //Saturation (0 - 255)
				createTrackbar("HighS", winParametrage, &HS, 255);
				//Trackbar pour la lumminosite comparer au noir
				createTrackbar("LowV", winParametrage, &LV, 255);//Value (0 - 255)
				createTrackbar("HighV", winParametrage, &HV, 255);
				Mat imgHSV;

				cvtColor(selection, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

				Mat imgDetection;

				inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprises dans l'intervalle de la couleur choisie par l'utilisateur

				//Retire les bruits
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				imshow(winParametrage, imgDetection);

				//Calcul de la "distance" à la cible. On s'en sert comme seuil.
				Moments position;
				position = moments(imgDetection);
				Ball.lastdZone = position.m00;

				key = waitKey(10);
			}
			
			//Extraction des points d'intérêts de la sélection de l'utilisateur
			Mat graySelect;
			int minHessian = 800;
			cvtColor(selection, graySelect, COLOR_BGR2GRAY);
			Ptr<SURF> detector = SURF::create(minHessian);
			vector<KeyPoint> KP;
			detector->detect(graySelect, KP);
			Mat KPimg;
			drawKeypoints(graySelect, KP, KPimg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
			Mat desc;
			Ptr<SURF> extractor = SURF::create();
			extractor->compute(graySelect, KP, desc);
			Ball.setimgGray(graySelect);
			Ball.setKP(KP);
			Ball.setDesc(desc);
			break;
		}
		key = waitKey(10);
	}
	//Fin de l'initiatlisation on ferme toutes les fenêtres et on passe au tracking
	destroyAllWindows();
#if output_video != ov_remote_ffmpeg
	cap.release();
#endif
}
示例#11
0
int main(int argc, char *argv[]) {
    av_register_all();

    AVFormatContext *pFormatCtx = NULL;
    openVideoFile(&pFormatCtx, argv[1]);

    av_dump_format(pFormatCtx, 0, argv[1], 0);

    AVCodecContext *pCodecCtx = NULL;
    AVCodec *pCodec = NULL;
    // Open codec
    openCodecAndCtx(pFormatCtx, &pCodecCtx, &pCodec);

    // Copy context
    /*
    pCodecCtx = avcodec_alloc_context3(pCodec);
    if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
        fprintf(stderr, "Counldn't copy codec context");
        return -1;
    }
    */
    AVFrame *pFrame = NULL;

    // Allocate video frame
    pFrame = av_frame_alloc();

    // Allocate an AVFrame structure
    AVFrame *pFrameRGB = av_frame_alloc();
    if (pFrameRGB == NULL)
        return -1;

    uint8_t *buffer = NULL;
    int numBytes;
    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
            pCodecCtx->height);
    buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
            pCodecCtx->width, pCodecCtx->height);

    struct SwsContext *sws_ctx = NULL;
    int frameFinished;
    AVPacket packet;
    sws_ctx = sws_getContext(pCodecCtx->width,
            pCodecCtx->height,
            pCodecCtx->pix_fmt,
            pCodecCtx->width,
            pCodecCtx->height,
            PIX_FMT_RGB24,
            SWS_BILINEAR,
            NULL,
            NULL,
            NULL
        );

    int i = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        if (packet.stream_index == videoStream) {
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
            if (frameFinished) {
            sws_scale(sws_ctx, (uint8_t const * const*)pFrame->data,
                    pFrame->linesize, 0, pCodecCtx->height,
                    pFrameRGB->data, pFrameRGB->linesize);
            }
            ++i;
            if (i <= 240 && i >= 230) {
                SaveFrame(pFrameRGB, pCodecCtx->width,
                        pCodecCtx->height, i);
            }
        }
    }
    av_free_packet(&packet);

    av_free(buffer);
    av_free(pFrameRGB);

    av_free(pFrame);

    avcodec_close(pCodecCtx);

    avformat_close_input(&pFormatCtx);

    return 0;
}
示例#12
0
static void ffmpeg_postprocess(struct anim *anim)
{
    AVFrame *input = anim->pFrame;
    ImBuf *ibuf = anim->last_frame;
    int filter_y = 0;

    if (!anim->pFrameComplete) {
        return;
    }

    /* This means the data wasnt read properly,
     * this check stops crashing */
    if (input->data[0] == 0 && input->data[1] == 0 &&
            input->data[2] == 0 && input->data[3] == 0)
    {
        fprintf(stderr, "ffmpeg_fetchibuf: "
                "data not read properly...\n");
        return;
    }

    av_log(anim->pFormatCtx, AV_LOG_DEBUG,
           "  POSTPROC: anim->pFrame planes: %p %p %p %p\n",
           input->data[0], input->data[1], input->data[2],
           input->data[3]);


    if (anim->ib_flags & IB_animdeinterlace) {
        if (avpicture_deinterlace(
                    (AVPicture *)
                    anim->pFrameDeinterlaced,
                    (const AVPicture *)
                    anim->pFrame,
                    anim->pCodecCtx->pix_fmt,
                    anim->pCodecCtx->width,
                    anim->pCodecCtx->height) < 0)
        {
            filter_y = true;
        }
        else {
            input = anim->pFrameDeinterlaced;
        }
    }

    if (!need_aligned_ffmpeg_buffer(anim)) {
        avpicture_fill((AVPicture *) anim->pFrameRGB,
                       (unsigned char *) ibuf->rect,
                       AV_PIX_FMT_RGBA, anim->x, anim->y);
    }

    if (ENDIAN_ORDER == B_ENDIAN) {
        int *dstStride   = anim->pFrameRGB->linesize;
        uint8_t **dst     = anim->pFrameRGB->data;
        int dstStride2[4] = { dstStride[0], 0, 0, 0 };
        uint8_t *dst2[4]  = { dst[0], 0, 0, 0 };
        int x, y, h, w;
        unsigned char *bottom;
        unsigned char *top;

        sws_scale(anim->img_convert_ctx,
                  (const uint8_t *const *)input->data,
                  input->linesize,
                  0,
                  anim->y,
                  dst2,
                  dstStride2);

        bottom = (unsigned char *) ibuf->rect;
        top = bottom + ibuf->x * (ibuf->y - 1) * 4;

        h = (ibuf->y + 1) / 2;
        w = ibuf->x;

        for (y = 0; y < h; y++) {
            unsigned char tmp[4];
            unsigned int *tmp_l =
                (unsigned int *) tmp;

            for (x = 0; x < w; x++) {
                tmp[0] = bottom[0];
                tmp[1] = bottom[1];
                tmp[2] = bottom[2];
                tmp[3] = bottom[3];

                bottom[0] = top[0];
                bottom[1] = top[1];
                bottom[2] = top[2];
                bottom[3] = top[3];

                *(unsigned int *) top = *tmp_l;

                bottom += 4;
                top += 4;
            }
            top -= 8 * w;
        }
    }
    else {
        int *dstStride   = anim->pFrameRGB->linesize;
        uint8_t **dst     = anim->pFrameRGB->data;
        int dstStride2[4] = { -dstStride[0], 0, 0, 0 };
        uint8_t *dst2[4]  = { dst[0] + (anim->y - 1) * dstStride[0],
                              0, 0, 0
                            };

        sws_scale(anim->img_convert_ctx,
                  (const uint8_t *const *)input->data,
                  input->linesize,
                  0,
                  anim->y,
                  dst2,
                  dstStride2);
    }

    if (need_aligned_ffmpeg_buffer(anim)) {
        uint8_t *src = anim->pFrameRGB->data[0];
        uint8_t *dst = (uint8_t *) ibuf->rect;
        for (int y = 0; y < anim->y; y++) {
            memcpy(dst, src, anim->x * 4);
            dst += anim->x * 4;
            src += anim->pFrameRGB->linesize[0];
        }
    }

    if (filter_y) {
        IMB_filtery(ibuf);
    }
}
示例#13
0
static int startffmpeg(struct anim *anim)
{
    int i, videoStream;

    AVCodec *pCodec;
    AVFormatContext *pFormatCtx = NULL;
    AVCodecContext *pCodecCtx;
    AVRational frame_rate;
    int frs_num;
    double frs_den;
    int streamcount;

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* The following for color space determination */
    int srcRange, dstRange, brightness, contrast, saturation;
    int *table;
    const int *inv_table;
#endif

    if (anim == NULL) return(-1);

    streamcount = anim->streamindex;

    if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) {
        return -1;
    }

    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    av_dump_format(pFormatCtx, 0, anim->name, 0);


    /* Find the video stream */
    videoStream = -1;

    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (streamcount > 0) {
                streamcount--;
                continue;
            }
            videoStream = i;
            break;
        }

    if (videoStream == -1) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    /* Find the decoder for the video stream */
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx->workaround_bugs = 1;

    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    frame_rate = av_get_r_frame_rate_compat(pFormatCtx->streams[videoStream]);
    if (pFormatCtx->streams[videoStream]->nb_frames != 0) {
        anim->duration = pFormatCtx->streams[videoStream]->nb_frames;
    }
    else {
        anim->duration = ceil(pFormatCtx->duration *
                              av_q2d(frame_rate) /
                              AV_TIME_BASE);
    }

    frs_num = frame_rate.num;
    frs_den = frame_rate.den;

    frs_den *= AV_TIME_BASE;

    while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) {
        frs_num /= 10;
        frs_den /= 10;
    }

    anim->frs_sec = frs_num;
    anim->frs_sec_base = frs_den;

    anim->params = 0;

    anim->x = pCodecCtx->width;
    anim->y = av_get_cropped_height_from_codec(pCodecCtx);

    anim->pFormatCtx = pFormatCtx;
    anim->pCodecCtx = pCodecCtx;
    anim->pCodec = pCodec;
    anim->videoStream = videoStream;

    anim->interlacing = 0;
    anim->orientation = 0;
    anim->framesize = anim->x * anim->y * 4;

    anim->curposition = -1;
    anim->last_frame = 0;
    anim->last_pts = -1;
    anim->next_pts = -1;
    anim->next_packet.stream_index = -1;

    anim->pFrame = av_frame_alloc();
    anim->pFrameComplete = false;
    anim->pFrameDeinterlaced = av_frame_alloc();
    anim->pFrameRGB = av_frame_alloc();

    if (need_aligned_ffmpeg_buffer(anim)) {
        anim->pFrameRGB->format = AV_PIX_FMT_RGBA;
        anim->pFrameRGB->width  = anim->x;
        anim->pFrameRGB->height = anim->y;

        if (av_frame_get_buffer(anim->pFrameRGB, 32) < 0) {
            fprintf(stderr, "Could not allocate frame data.\n");
            avcodec_close(anim->pCodecCtx);
            avformat_close_input(&anim->pFormatCtx);
            av_frame_free(&anim->pFrameRGB);
            av_frame_free(&anim->pFrameDeinterlaced);
            av_frame_free(&anim->pFrame);
            anim->pCodecCtx = NULL;
            return -1;
        }
    }

    if (avpicture_get_size(AV_PIX_FMT_RGBA, anim->x, anim->y) !=
            anim->x * anim->y * 4)
    {
        fprintf(stderr,
                "ffmpeg has changed alloc scheme ... ARGHHH!\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

    if (anim->ib_flags & IB_animdeinterlace) {
        avpicture_fill((AVPicture *) anim->pFrameDeinterlaced,
                       MEM_callocN(avpicture_get_size(
                                       anim->pCodecCtx->pix_fmt,
                                       anim->pCodecCtx->width,
                                       anim->pCodecCtx->height),
                                   "ffmpeg deinterlace"),
                       anim->pCodecCtx->pix_fmt,
                       anim->pCodecCtx->width,
                       anim->pCodecCtx->height);
    }

    if (pCodecCtx->has_b_frames) {
        anim->preseek = 25; /* FIXME: detect gopsize ... */
    }
    else {
        anim->preseek = 0;
    }

    anim->img_convert_ctx = sws_getContext(
                                anim->x,
                                anim->y,
                                anim->pCodecCtx->pix_fmt,
                                anim->x,
                                anim->y,
                                AV_PIX_FMT_RGBA,
                                SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT,
                                NULL, NULL, NULL);

    if (!anim->img_convert_ctx) {
        fprintf(stderr,
                "Can't transform color space??? Bailing out...\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
    if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange,
                                  &table, &dstRange, &brightness, &contrast, &saturation))
    {
        srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG;
        inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace);

        if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange,
                                     table, dstRange, brightness, contrast, saturation))
        {
            fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
        }
    }
    else {
        fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
    }
#endif

    return (0);
}
示例#14
0
static tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
{
	int ret = 0;

#if HAVE_FFMPEG
	int size;
	tsk_bool_t send_idr, send_hdr;
#endif

	tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;

	if(!self || !in_data || !in_size){
		TSK_DEBUG_ERROR("Invalid parameter");
		return 0;
	}

	if(!self->opened){
		TSK_DEBUG_ERROR("Codec not opened");
		return 0;
	}

	if(h264->encoder.passthrough) {
		tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), (const uint8_t*)in_data, in_size);
	}
	else { // !h264->encoder.passthrough
#if HAVE_FFMPEG		// wrap yuv420 buffer
		size = avpicture_fill((AVPicture *)h264->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, h264->encoder.context->width, h264->encoder.context->height);
		if(size != in_size){
			/* guard */
			TSK_DEBUG_ERROR("Invalid size");
			return 0;
		}

		// send IDR for:
		//	- the first frame
		//  - remote peer requested an IDR
		//	- every second within the first 4seconds
		send_idr = (
			h264->encoder.frame_count++ == 0
			|| h264 ->encoder.force_idr
			|| ( (h264->encoder.frame_count < (int)TMEDIA_CODEC_VIDEO(h264)->out.fps * 4) && ((h264->encoder.frame_count % TMEDIA_CODEC_VIDEO(h264)->out.fps)==0) )
		   );

		// send SPS and PPS headers for:
		//  - IDR frames (not required but it's the easiest way to deal with pkt loss)
		//  - every 5 seconds after the first 4seconds
		send_hdr = (
			send_idr
			|| ( (h264->encoder.frame_count % (TMEDIA_CODEC_VIDEO(h264)->out.fps * 5))==0 )
			);
		if(send_hdr){
			tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), h264->encoder.context->extradata, (tsk_size_t)h264->encoder.context->extradata_size);
		}
	
		// Encode data
	#if LIBAVCODEC_VERSION_MAJOR <= 53
		h264->encoder.picture->pict_type = send_idr ? FF_I_TYPE : 0;
	#else
		h264->encoder.picture->pict_type = send_idr ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_NONE;
	#endif
		h264->encoder.picture->pts = AV_NOPTS_VALUE;
		h264->encoder.picture->quality = h264->encoder.context->global_quality;
		// h264->encoder.picture->pts = h264->encoder.frame_count; MUST NOT
		ret = avcodec_encode_video(h264->encoder.context, h264->encoder.buffer, size, h264->encoder.picture);	
		if(ret > 0){
			tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), h264->encoder.buffer, (tsk_size_t)ret);
		}
		h264 ->encoder.force_idr = tsk_false;
#endif
	}// else(!h264->encoder.passthrough)

	return 0;
}
示例#15
0
static int icvOpenAVI_FFMPEG( CvCaptureAVI_FFMPEG* capture, const char* filename )
{
    int err, valid = 0, video_index = -1, i;
    AVFormatContext *ic;

    capture->ic = NULL;
    capture->video_stream = -1;
    capture->video_st = NULL;
    /* register all codecs, demux and protocols */
    av_register_all();

    err = av_open_input_file(&ic, filename, NULL, 0, NULL);
    if (err < 0) {
	    CV_WARN("Error opening file");
	    goto exit_func;
    }
    capture->ic = ic;
    err = av_find_stream_info(ic);
    if (err < 0) {
	    CV_WARN("Could not find codec parameters");
	    goto exit_func;
    }
    for(i = 0; i < ic->nb_streams; i++) {
#if LIBAVFORMAT_BUILD > 4628
        AVCodecContext *enc = ic->streams[i]->codec;
#else
        AVCodecContext *enc = &ic->streams[i]->codec;
#endif
        AVCodec *codec;
    if( CODEC_TYPE_VIDEO == enc->codec_type && video_index < 0) {
        video_index = i;
        codec = avcodec_find_decoder(enc->codec_id);
        if (!codec ||
        avcodec_open(enc, codec) < 0)
        goto exit_func;
        capture->video_stream = i;
        capture->video_st = ic->streams[i];
        capture->picture = avcodec_alloc_frame();

        capture->rgb_picture.data[0] = (uchar*)cvAlloc(
                                avpicture_get_size( PIX_FMT_BGR24,
                                enc->width, enc->height ));
        avpicture_fill( (AVPicture*)&capture->rgb_picture, capture->rgb_picture.data[0],
                PIX_FMT_BGR24, enc->width, enc->height );

        cvInitImageHeader( &capture->frame, cvSize( enc->width,
                                   enc->height ), 8, 3, 0, 4 );
        cvSetData( &capture->frame, capture->rgb_picture.data[0],
                           capture->rgb_picture.linesize[0] );
        break;
    }
    }


    if(video_index >= 0)
    valid = 1;

exit_func:

    if( !valid )
        icvCloseAVI_FFMPEG( capture );

    return valid;
}
示例#16
0
void *encode_video_thread(void *arg)
{
    codec_state *cs = (codec_state *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int p = 0;
    int err;
    int got_packet;
    rtp_msg_t *s_video_msg;
    int video_frame_finished;
    AVFrame *s_video_frame;
    AVFrame *webcam_frame;
    s_video_frame = avcodec_alloc_frame();
    webcam_frame = avcodec_alloc_frame();
    AVPacket enc_video_packet;

    uint8_t *buffer;
    int numBytes;
    /* Determine required buffer size and allocate buffer */
    numBytes = avpicture_get_size(PIX_FMT_YUV420P, cs->webcam_decoder_ctx->width, cs->webcam_decoder_ctx->height);
    buffer = (uint8_t *)av_calloc(numBytes * sizeof(uint8_t),1);
    avpicture_fill((AVPicture *)s_video_frame, buffer, PIX_FMT_YUV420P, cs->webcam_decoder_ctx->width,
                   cs->webcam_decoder_ctx->height);
    cs->sws_ctx = sws_getContext(cs->webcam_decoder_ctx->width, cs->webcam_decoder_ctx->height,
                                 cs->webcam_decoder_ctx->pix_fmt, cs->webcam_decoder_ctx->width, cs->webcam_decoder_ctx->height, PIX_FMT_YUV420P,
                                 SWS_BILINEAR, NULL, NULL, NULL);

    while (!cs->quit && cs->send_video) {

        if (av_read_frame(cs->video_format_ctx, packet) < 0) {
            printf("error reading frame\n");

            if (cs->video_format_ctx->pb->error != 0)
                break;

            continue;
        }

        if (packet->stream_index == cs->video_stream) {
            if (avcodec_decode_video2(cs->webcam_decoder_ctx, webcam_frame, &video_frame_finished, packet) < 0) {
                printf("couldn't decode\n");
                continue;
            }

            av_free_packet(packet);
            sws_scale(cs->sws_ctx, (uint8_t const * const *)webcam_frame->data, webcam_frame->linesize, 0,
                      cs->webcam_decoder_ctx->height, s_video_frame->data, s_video_frame->linesize);
            /* create a new I-frame every 60 frames */
            ++p;

            if (p == 60) {

                s_video_frame->pict_type = AV_PICTURE_TYPE_BI ;
            } else if (p == 61) {
                s_video_frame->pict_type = AV_PICTURE_TYPE_I ;
                p = 0;
            } else {
                s_video_frame->pict_type = AV_PICTURE_TYPE_P ;
            }

            if (video_frame_finished) {
                err = avcodec_encode_video2(cs->video_encoder_ctx, &enc_video_packet, s_video_frame, &got_packet);

                if (err < 0) {
                    printf("could not encode video frame\n");
                    continue;
                }

                if (!got_packet) {
                    continue;
                }

                pthread_mutex_lock(&cs->rtp_msg_mutex_lock);
                THREADLOCK()

                if (!enc_video_packet.data) fprintf(stderr, "video packet data is NULL\n");

                s_video_msg = rtp_msg_new ( cs->_rtp_video, enc_video_packet.data, enc_video_packet.size ) ;

                if (!s_video_msg) {
                    printf("invalid message\n");
                }

                rtp_send_msg ( cs->_rtp_video, s_video_msg, cs->_networking );
                THREADUNLOCK()
                pthread_mutex_unlock(&cs->rtp_msg_mutex_lock);
                av_free_packet(&enc_video_packet);
            }
        } else {
            av_free_packet(packet);
        }
    }
示例#17
0
bool VideoDecoder::Load()
{
  unsigned int i;
  int numBytes;
  uint8_t *tmp;

  if ( avformat_open_input(&mFormatContext, mFilename.c_str(), NULL, NULL) != 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - av_open_input_file failed\n");
    return false;
  }

  if ( avformat_find_stream_info(mFormatContext, 0) < 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - av_find_stream_info failed\n");
    return false;
  }

  /* Some debug info */
  av_dump_format(mFormatContext, 0, mFilename.c_str(), false);

  for (i = 0; i < mFormatContext->nb_streams; i++)
  {
    if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
    {
      mVideoStream = i;
      break;
    }
  }

  if ( mVideoStream == -1 )
  {
    fprintf(stderr, "VideoDecoder::Load - No video stream found.\n");
    return false;
  }

  mCodecContext = mFormatContext->streams[mVideoStream]->codec;
  mCodec = avcodec_find_decoder(mCodecContext->codec_id);
  if ( !mCodec )
  {
    fprintf(stderr, "VideoDecoder::Load - avcodec_find_decoder failed\n");
    return false;
  }

  if ( avcodec_open2(mCodecContext, mCodec, 0) < 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - avcodec_open failed\n");
    return false;
  }

  mFrame = avcodec_alloc_frame();
  if ( !mFrame )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating frame.\n");
    return false;
  }

  mFrameRGB = avcodec_alloc_frame();
  if ( !mFrameRGB )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating RGB frame.\n");
    return false;
  }

  /* Determine required buffer size and allocate buffer */
  numBytes = avpicture_get_size(PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height);
  tmp = (uint8_t *)realloc(mBuffer, numBytes * sizeof(uint8_t));
  if ( !tmp )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating buffer.\n");
    return false;
  }
  mBuffer = tmp;

  avpicture_fill((AVPicture *)mFrameRGB, mBuffer, PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height);

  mSwsContext = sws_getContext(mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt,
                               mCodecContext->width, mCodecContext->height, PIX_FMT_RGB24,
                               SWS_BICUBIC, NULL, NULL, NULL);
  if ( !mSwsContext )
  {
    fprintf(stderr, "VideoDecoder::Load - sws_getContext failed.\n");
    return false;
  }

  mDoneReading = false;

  return true;
}
示例#18
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVFrame         *pFrameRGB = NULL;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer = NULL;

  AVDictionary    *optionsDict = NULL;
  struct SwsContext      *sws_ctx = NULL;
  
  if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
  }
  // Register all formats and codecs
  av_register_all();
  
  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++)
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
      videoStream=i;
      break;
    }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=av_frame_alloc();
  
  // Allocate an AVFrame structure
  pFrameRGB=av_frame_alloc();
  if(pFrameRGB==NULL)
    return -1;
  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width,
			      pCodecCtx->height);
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

  sws_ctx =
    sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        AV_PIX_FMT_RGB24,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );
  
  // Assign appropriate parts of buffer to image planes in pFrameRGB
  // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
  // of AVPicture
  avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
		 pCodecCtx->width, pCodecCtx->height);
  
  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	// Convert the image from its native format to RGB
        sws_scale
        (
            sws_ctx,
            (uint8_t const * const *)pFrame->data,
            pFrame->linesize,
            0,
            pCodecCtx->height,
            pFrameRGB->data,
            pFrameRGB->linesize
        );
	
	// Save the frame to disk
	if(++i<=5)
	  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
		    i);
      }
    }
    
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
  }
  
  // Free the RGB image
  av_free(buffer);
  av_free(pFrameRGB);
  
  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
示例#19
0
bool vpFFMPEG::openEncoder(const char *filename, unsigned int w, unsigned int h, AVCodecID codec)
#endif
{
  av_register_all();

  /* find the mpeg1 video encoder */
  pCodec = avcodec_find_encoder(codec);
  if (pCodec == NULL) {
    fprintf(stderr, "codec not found\n");
    return false;
  }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,5,0) // libavcodec 53.5.0
  pCodecCtx = avcodec_alloc_context();
#else
  pCodecCtx = avcodec_alloc_context3(NULL);
#endif

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
  pFrame = avcodec_alloc_frame();
  pFrameRGB = avcodec_alloc_frame();
#else
  pFrame = av_frame_alloc(); // libavcodec 55.34.1
  pFrameRGB = av_frame_alloc(); // libavcodec 55.34.1
#endif

  /* put sample parameters */
  pCodecCtx->bit_rate = (int)bit_rate;
  /* resolution must be a multiple of two */
  pCodecCtx->width = (int)w;
  pCodecCtx->height = (int)h;
  this->width = (int)w;
  this->height = (int)h;
  /* frames per second */
  pCodecCtx->time_base.num = 1;
  pCodecCtx->time_base.den = framerate_encoder;
  pCodecCtx->gop_size = 10; /* emit one intra frame every ten frames */
  pCodecCtx->max_b_frames=1;
  pCodecCtx->pix_fmt = PIX_FMT_YUV420P;

  /* open it */
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,35,0) // libavcodec 53.35.0
  if (avcodec_open (pCodecCtx, pCodec) < 0) {
#else
  if (avcodec_open2 (pCodecCtx, pCodec, NULL) < 0) {
#endif
    fprintf(stderr, "could not open codec\n");
    return false;
  }

  /* the codec gives us the frame size, in samples */

  f = fopen(filename, "wb");
  if (!f) {
    fprintf(stderr, "could not open %s\n", filename);
    return false;
  }

  outbuf_size = 100000;
  outbuf = new uint8_t[outbuf_size];

  numBytes = avpicture_get_size (PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height);
  picture_buf = new uint8_t[numBytes];
  avpicture_fill((AVPicture *)pFrame, picture_buf, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

  numBytes = avpicture_get_size (PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height);
  buffer = new uint8_t[numBytes];
  avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

  img_convert_ctx= sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height,PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  
  encoderWasOpened = true;

  return true;
}


/*!
  Saves the image I as frame of the video.
  
  \param I : the image to save.
  
  \return It returns true if the image could be saved.
*/
bool vpFFMPEG::saveFrame(vpImage<vpRGBa> &I)
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  writeBitmap(I);
  sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, pCodecCtx->height, pFrame->data, pFrame->linesize);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
  out_size = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, pFrame);
  fwrite(outbuf, 1, (size_t)out_size, f);
#else
  AVPacket pkt;
  av_init_packet(&pkt);
  pkt.data = NULL;    // packet data will be allocated by the encoder
  pkt.size = 0;

  int got_output;
  int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
  if (ret < 0) {
    std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
    return false;
  }
  if (got_output) {
    fwrite(pkt.data, 1, pkt.size, f);
    av_free_packet(&pkt);
  }
#endif
  fflush(stdout);
  return true;
}


/*!
  Saves the image I as frame of the video.
  
  \param I : the image to save.
  
  \return It returns true if the image could be saved.
*/
bool vpFFMPEG::saveFrame(vpImage<unsigned char> &I)
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  writeBitmap(I);
  sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, pCodecCtx->height, pFrame->data, pFrame->linesize);  
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
  out_size = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, pFrame);
  fwrite(outbuf, 1, (size_t)out_size, f);
#else
  AVPacket pkt;
  av_init_packet(&pkt);
  pkt.data = NULL;    // packet data will be allocated by the encoder
  pkt.size = 0;

  int got_output;
  int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
  if (ret < 0) {
    std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
    return false;
  }
  if (got_output) {
    fwrite(pkt.data, 1, pkt.size, f);
    av_free_packet(&pkt);
  }
#endif

  fflush(stdout);
  return true;
}

/*!
  Ends the writing of the video and close the file.
  
  \return It returns true if the file was closed without problem
*/
bool vpFFMPEG::endWrite()
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  int ret = 1;
  while (ret != 0)
  {

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
    ret = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, NULL);
    fwrite(outbuf, 1, (size_t)out_size, f);
#else
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;    // packet data will be allocated by the encoder
    pkt.size = 0;
    int got_output;
    ret = avcodec_encode_video2(pCodecCtx, &pkt, NULL, &got_output);
    if (ret < 0) {
      std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
      return false;
    }
    if (got_output) {
      fwrite(pkt.data, 1, pkt.size, f);
      av_free_packet(&pkt);
    }
#endif
  }

  /*The end of a mpeg file*/
  outbuf[0] = 0x00;
  outbuf[1] = 0x00;
  outbuf[2] = 0x01;
  outbuf[3] = 0xb7;
  fwrite(outbuf, 1, 4, f);
  fclose(f);
  f = NULL;
  return true;
}

/*!
  This method enables to fill the frame bitmap thanks to the vpImage bitmap.
*/
void vpFFMPEG::writeBitmap(vpImage<vpRGBa> &I)
{
  unsigned char* beginInput = (unsigned char*)I.bitmap;
  unsigned char* input = NULL;
  unsigned char* beginOutput = (unsigned char*)pFrameRGB->data[0];
  int widthStep = pFrameRGB->linesize[0];
  
  for(int i=0 ; i < height ; i++)
  {
    input = beginInput + 4 * i * width;
    unsigned char *output = beginOutput + i * widthStep;
    for(int j=0 ; j < width ; j++)
    {
      *(output++) = *(input);
      *(output++) = *(input+1);
      *(output++) = *(input+2);

      input+=4;
    }
  }
}
void PrivateDecoderCrystalHD::FillFrame(BC_DTS_PROC_OUT *out)
{
    bool second_field = false;
    if (m_frame)
    {
        if (out->PicInfo.picture_number != m_frame->frameNumber)
        {
            LOG(VB_PLAYBACK, LOG_WARNING, LOC + "Missing second field");
            AddFrameToQueue();
        }
        else
        {
            second_field = true;
        }
    }

    int in_width   = out->PicInfo.width;
    int in_height  = out->PicInfo.height;
    int out_width  = (in_width + 15) & (~0xf);
    int out_height = in_height;
    int size       = ((out_width * (out_height + 1)) * 3) / 2;
    uint8_t* src   = out->Ybuff;

    if (!m_frame)
    {
        unsigned char* buf  = new unsigned char[size];
        m_frame = new VideoFrame();
        init(m_frame, FMT_YV12, buf, out_width, out_height, size);
        m_frame->timecode = (int64_t)out->PicInfo.timeStamp;
        m_frame->frameNumber = out->PicInfo.picture_number;
    }

    if (!m_frame)
        return;

    // line 21 data (608/708 captions)
    // this appears to be unimplemented in the driver
    if (out->UserData && out->UserDataSz)
    {
        int size = out->UserDataSz > 1024 ? 1024 : out->UserDataSz;
        m_frame->priv[0] = new unsigned char[size];
        memcpy(m_frame->priv[0], out->UserData, size);
        m_frame->qstride = size; // don't try this at home
    }

    PixelFormat out_fmt = PIX_FMT_YUV420P;
    PixelFormat in_fmt  = bcmpixfmt_to_pixfmt(m_pix_fmt);
    AVPicture img_in, img_out;
    avpicture_fill(&img_out, (uint8_t *)m_frame->buf, out_fmt,
                   out_width, out_height);
    avpicture_fill(&img_in, src, in_fmt,
                   in_width, in_height);

    if (!(out->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC))
    {
        myth_sws_img_convert(&img_out, out_fmt, &img_in, in_fmt,
                             in_width, in_height);
        m_frame->interlaced_frame = 0;
        AddFrameToQueue();
    }
    else
    {
        img_out.linesize[0] *= 2;
        img_out.linesize[1] *= 2;
        img_out.linesize[2] *= 2;
        m_frame->top_field_first = out->PicInfo.pulldown == vdecTopBottom;
        int field = out->PoutFlags & BC_POUT_FLAGS_FLD_BOT;
        if (field)
        {
            img_out.data[0] += out_width;
            img_out.data[1] += out_width >> 1;
            img_out.data[2] += out_width >> 1;
        }
        myth_sws_img_convert(&img_out, out_fmt, &img_in,
                             in_fmt, in_width, in_height / 2);
        if (second_field)
            AddFrameToQueue();
    }
}
bool VideoDecoder::InitCodec(const uint32_t width, const uint32_t height)
{
  if (codec_initialized_)
  {
    // TODO(mani-monaj): Maybe re-initialize
    return true;
  }

  try
  {
    ThrowOnCondition(width == 0 || height == 0, std::string("Invalid frame size:") +
                     boost::lexical_cast<std::string>(width) + " x " + boost::lexical_cast<std::string>(height));

    // Very first init
    avcodec_register_all();
    av_register_all();
    av_log_set_level(AV_LOG_QUIET);

    codec_ptr_ = avcodec_find_decoder(CODEC_ID_H264);
    ThrowOnCondition(codec_ptr_ == NULL, "Codec H264 not found!");


    codec_ctx_ptr_ = avcodec_alloc_context3(codec_ptr_);
    codec_ctx_ptr_->pix_fmt = AV_PIX_FMT_YUV420P;
    codec_ctx_ptr_->skip_frame = AVDISCARD_DEFAULT;
    codec_ctx_ptr_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
    codec_ctx_ptr_->skip_loop_filter = AVDISCARD_DEFAULT;
    codec_ctx_ptr_->workaround_bugs = AVMEDIA_TYPE_VIDEO;
    codec_ctx_ptr_->codec_id = AV_CODEC_ID_H264;
    codec_ctx_ptr_->skip_idct = AVDISCARD_DEFAULT;
    codec_ctx_ptr_->width = width;
    codec_ctx_ptr_->height = height;

    ThrowOnCondition(
          avcodec_open2(codec_ctx_ptr_, codec_ptr_, NULL) < 0,
          "Can not open the decoder!");



    const uint32_t num_bytes = avpicture_get_size(PIX_FMT_RGB24, codec_ctx_ptr_->width, codec_ctx_ptr_->height);
    {
       frame_ptr_ = avcodec_alloc_frame();
       frame_rgb_ptr_ = avcodec_alloc_frame();

       ThrowOnCondition(!frame_ptr_ || !frame_rgb_ptr_, "Can not allocate memory for frames!");

       frame_rgb_raw_ptr_ = reinterpret_cast<uint8_t*>(av_malloc(num_bytes * sizeof(uint8_t)));
       ThrowOnCondition(frame_rgb_raw_ptr_ == NULL,
                        std::string("Can not allocate memory for the buffer: ") +
                        boost::lexical_cast<std::string>(num_bytes));
       ThrowOnCondition(0 == avpicture_fill(
                          reinterpret_cast<AVPicture*>(frame_rgb_ptr_), frame_rgb_raw_ptr_, PIX_FMT_RGB24,
                          codec_ctx_ptr_->width, codec_ctx_ptr_->height),
                        "Failed to initialize the picture data structure.");
    }
    av_init_packet(&packet_);
  }
  catch (const std::runtime_error& e)
  {
    ARSAL_PRINT(ARSAL_PRINT_ERROR, LOG_TAG, "%s", e.what());
    Cleanup();
    return false;
  }

  codec_initialized_ = true;
  first_iframe_recv_ = false;
  ARSAL_PRINT(ARSAL_PRINT_INFO, LOG_TAG, "H264 Codec is initialized!");
  return true;
}
示例#22
0
int convert_scale(uint8_t *inbuffer,   int in_width,  int in_height,  char *palette,
                  uint8_t **outbuffer, int out_width, int out_height)
{
  uint32_t in_pix_fmt;
  struct SwsContext *sws_ctx;

  //set pixel format
  if (palette == "BGR24") {
    in_pix_fmt = AV_PIX_FMT_BGR24;
  } else if (palette == "BGR32") {
    in_pix_fmt = AV_PIX_FMT_BGR32;
  } else if (palette == "RGB24") {
    in_pix_fmt = AV_PIX_FMT_RGB24 ;
  } else if (palette == "RGB32") {
    in_pix_fmt = AV_PIX_FMT_RGB32;
  } else if (palette == "YUYV") {
    in_pix_fmt = AV_PIX_FMT_YUYV422;
  } else if (palette == "YUV420") {
    in_pix_fmt = AV_PIX_FMT_YUV420P;
  } else if (palette == "GREY") {
    in_pix_fmt = AV_PIX_FMT_GRAY8;
  } else {
    in_pix_fmt = AV_PIX_FMT_BGR24;  // default!
  }

  //calculate the bytes needed for the output image
  //*outsize = avpicture_get_size(out_pix_fmt, out_width, out_height);

  //create buffer for the output image
  //*outbuffer = (uint8_t*)av_malloc(*outsize);

  //create ffmpeg frame structures.  These do not allocate space for image data,
  //just the pointers and other information about the image.
  AVFrame *inpic  = avcodec_alloc_frame();
  AVFrame *outpic = avcodec_alloc_frame();

  //this will set the pointers in the frame structures to the right points in
  //the input and output buffers.
  avpicture_fill((AVPicture*)inpic,  inbuffer,   in_pix_fmt,  in_width,  in_height);
  avpicture_fill((AVPicture*)outpic, *outbuffer, AV_PIX_FMT_GRAY8, out_width, out_height);

  //create the conversion context
  sws_ctx = sws_getContext(in_width,  in_height,  in_pix_fmt,
                           out_width, out_height, AV_PIX_FMT_GRAY8,
                           SWS_FAST_BILINEAR, NULL, NULL, NULL);

  /* perform the conversion */
  int ret = sws_scale(sws_ctx, inpic->data,  inpic->linesize, 0, in_height,
                               outpic->data, outpic->linesize);
  //printf("ret = %d\n", ret);
  //save_buffer_to_file(outpic->data[0], *outsize, "./gray.raw");
  //save_buffer_to_file(outbuffer, *outsize, "./gray.raw");

  //av_free(outbuffer);
  //av_free(inpic);
  avcodec_free_frame(&inpic);
  //av_free(outpic);
  avcodec_free_frame(&outpic);
  sws_freeContext(sws_ctx);

  return ret;
}
示例#23
0
/* Init video source 
 * file: path to open
 * width: destination frame width in pixels - use 0 for source
 * height: destination frame height in pixels - use 0 for source
 * format: PIX_FMT_GRAY8 or PIX_FMT_RGB24
 * Returns video context on succes, NULL otherwise
 */
video *video_init(char *file, int width, int height, int format)
{
    int i = 0;
	
    video *ret = (video*)malloc(sizeof(video));
    memset(ret, 0, sizeof(video));
    ret->format = format;
	
    /* Init ffmpeg */
    av_register_all();
	
    /* Open file, check usability */
    if(av_open_input_file(&ret->pFormatCtx, file, NULL, 0, NULL) ||
       av_find_stream_info(ret->pFormatCtx) < 0)
	return video_quit(ret);
	
    /* Find the first video stream */
    ret->videoStream = -1;
    for(i = 0; i < ret->pFormatCtx->nb_streams; i++)
	if(ret->pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
	    ret->videoStream = i;
	    break;
	}
	
    if(ret->videoStream == -1)
	return video_quit(ret);
	
    /* Get context for codec, pin down target width/height, find codec */
    ret->pCtx = ret->pFormatCtx->streams[ret->videoStream]->codec;
    ret->width = width? width: ret->pCtx->width;
    ret->height = height? height: ret->pCtx->height;
    ret->pCodec = avcodec_find_decoder(ret->pCtx->codec_id);
	
    if(!ret->pCodec ||
       avcodec_open(ret->pCtx, ret->pCodec) < 0)
	return video_quit(ret);
	
    /* Frame rate fix for some codecs */
    if(ret->pCtx->time_base.num > 1000 && ret->pCtx->time_base.den == 1)
	ret->pCtx->time_base.den = 1000;
	
    /* Get framebuffers */
    ret->pRaw = avcodec_alloc_frame();
    ret->pDat = avcodec_alloc_frame();
	
    if(!ret->pRaw || !ret->pDat)
	return video_quit(ret);
	
    /* Create data buffer */
    ret->buffer = (uint8_t*)malloc(avpicture_get_size(ret->format, 
					    ret->pCtx->width, ret->pCtx->height));
	
    /* Init buffers */
    avpicture_fill((AVPicture *) ret->pDat, ret->buffer, ret->format, 
		   ret->pCtx->width, ret->pCtx->height);
	
    /* Init scale & convert */
    ret->Sctx = sws_getContext(ret->pCtx->width, ret->pCtx->height, ret->pCtx->pix_fmt,
			       ret->width, ret->height, (PixelFormat)ret->format, SWS_BICUBIC, NULL, NULL, NULL);
	
    if(!ret->Sctx)
	return video_quit(ret);
	
    /* Give some info on stderr about the file & stream */
    //dump_format(ret->pFormatCtx, 0, file, 0);
	
    return ret;
}
示例#24
0
void djvFFmpegSave::write(const djvImage & in, const djvImageIoFrameInfo & frame)
    throw (djvError)
{
    //DJV_DEBUG("djvFFmpegSave::write");
    //DJV_DEBUG_PRINT("in = " << in);
    //DJV_DEBUG_PRINT("frame = " << frame);

    // Convert the image if necessary.

    const djvPixelData * p = &in;

    if (in.info() != _info)
    {
        //DJV_DEBUG_PRINT("convert = " << _image);

        _image.zero();

        djvOpenGlImage::copy(in, _image);

        p = &_image;
    }
    
    // Encode the image.

    avpicture_fill(
        (AVPicture *)_avFrameRgb,
        p->data(),
        _avFrameRgbPixel,
        p->w(),
        p->h());
    
    quint64 pixelByteCount = p->pixelByteCount();
    quint64 scanlineByteCount = p->scanlineByteCount();
    quint64 dataByteCount = p->dataByteCount();
    
    const uint8_t * const data [] =
    {
        p->data() + dataByteCount - scanlineByteCount,
        p->data() + dataByteCount - scanlineByteCount,
        p->data() + dataByteCount - scanlineByteCount,
        p->data() + dataByteCount - scanlineByteCount
    };
    
    const int lineSize [] =
    {
        -scanlineByteCount,
        -scanlineByteCount,
        -scanlineByteCount,
        -scanlineByteCount
    };
    
    sws_scale(
        _swsContext,
        //(uint8_t const * const *)_avFrameRgb->data,
        //_avFrameRgb->linesize,
        data,
        lineSize,
        0,
        p->h(),
        _avFrame->data,
        _avFrame->linesize);
    
    AVCodecContext * avCodecContext = _avStream->codec;

    djvFFmpeg::Packet packet;
    packet().data = 0;
    packet().size = 0;
    
    _avFrame->pts     = _frame++;
    _avFrame->quality = avCodecContext->global_quality;
    
    int finished = 0;
    
    int r = avcodec_encode_video2(
        avCodecContext,
        &packet(),
        _avFrame,
        &finished);
    
    if (r < 0)
    {
        throw djvError(
            djvFFmpeg::staticName,
            djvFFmpeg::toString(r));
    }

    //DJV_DEBUG_PRINT("finished = " << finished);
    //DJV_DEBUG_PRINT("size = " << packet().size);
    //DJV_DEBUG_PRINT("pts = " << static_cast<qint64>(packet().pts));
    //DJV_DEBUG_PRINT("dts = " << static_cast<qint64>(packet().dts));
    //DJV_DEBUG_PRINT("duration = " << static_cast<qint64>(packet().duration));

    // Write the image.
    
    packet().pts = av_rescale_q(
        packet().pts,
        avCodecContext->time_base,
        _avStream->time_base);
    packet().dts = av_rescale_q(
        packet().dts,
        avCodecContext->time_base,
        _avStream->time_base);
    packet().duration = av_rescale_q(
        packet().duration,
        avCodecContext->time_base,
        _avStream->time_base);
    packet().stream_index = _avStream->index;

    r = av_interleaved_write_frame(_avFormatContext, &packet());
    
    if (r < 0)
    {
        throw djvError(
            djvFFmpeg::staticName,
            djvFFmpeg::toString(r));
    }
}
示例#25
0
std::unique_ptr<ImgBuf>
VideoConverterFfmpeg::convert(const ImgBuf& src)
{
    std::unique_ptr<ImgBuf> ret;    
    
    const int width = src.width;
    const int height = src.height;

    PixelFormat dst_pixFmt = fourcc_to_ffmpeg(_dst_fmt);
    assert(dst_pixFmt != PIX_FMT_NONE);
    PixelFormat src_pixFmt = PIX_FMT_RGB24;
    
#ifdef HAVE_SWSCALE_H

    if (!_swsContext.get()) {

        _swsContext.reset(new SwsContextWrapper(sws_getContext(width, height,
            src_pixFmt, width, height, dst_pixFmt, SWS_BILINEAR, nullptr, nullptr,
            nullptr)));

        if (!_swsContext->getContext()) {

            // This means we will try to assign the 
            // context again next time.
            _swsContext.reset();
            
            return ret;
        }
    }
#endif


    AVPicture srcpicture = {{src.data, nullptr, nullptr, nullptr},
        {static_cast<int>(src.stride[0]), 0, 0, 0}};
    
    
    int bufsize = avpicture_get_size(dst_pixFmt, width, height);
    if (bufsize == -1) {
        return ret;
    }

    std::uint8_t* dstbuffer = new std::uint8_t[bufsize];

    AVPicture dstpicture;
    avpicture_fill(&dstpicture, dstbuffer, dst_pixFmt, width, height);
    
 
#ifndef HAVE_SWSCALE_H
    img_convert(&dstpicture, dst_pixFmt, &srcpicture, src_pixFmt, width,
                height);
#else

    int rv = sws_scale(_swsContext->getContext(), srcpicture.data,
                       srcpicture.linesize, 0, height, dstpicture.data,
                       dstpicture.linesize);

    if (rv == -1) {
        return ret;
    }
#endif    
    ret.reset(new ImgBuf(_dst_fmt, dstbuffer, bufsize, src.width,
                         src.height));
    std::copy(dstpicture.linesize, dstpicture.linesize+4, ret->stride.begin()); 
 
    return ret;
}
示例#26
0
void djvFFmpegSave::open(const djvFileInfo & fileInfo, const djvImageIoInfo & info)
    throw (djvError)
{
    //DJV_DEBUG("djvFFmpegSave::open");
    //DJV_DEBUG_PRINT("fileInfo = " << fileInfo);
    //DJV_DEBUG_PRINT("info = " << info);
    
    close();
    
    _frame = 0;
        
    // Open the file.
    
    djvPixel::PIXEL pixel         = static_cast<djvPixel::PIXEL>(0);
    bool            bgr           = false;
    
    QString         avFormatName;
    AVCodecID       avCodecId     = static_cast<AVCodecID>(0);
    AVPixelFormat   avPixel       = static_cast<AVPixelFormat>(0);
    double          avQScale      = -1.0;
    
    _avFrameRgbPixel = static_cast<AVPixelFormat>(0);

    djvFFmpeg::Dictionary dictionary;
    QString               value;

    switch (_options.format)
    {
        /*case djvFFmpeg::H264:

            pixel            = djvPixel::RGBA_U8;
        
            avFormatName     = "mov";
            avCodecId        = AV_CODEC_ID_H264;
            
            switch (_options.quality)
            {
                case djvFFmpeg::LOW:    value = "fast";   break;
                case djvFFmpeg::MEDIUM: value = "medium"; break;
                case djvFFmpeg::HIGH:   value = "slow";   break;

                default: break;
            }

            av_dict_set(
                dictionary(),
                "preset",
                value.toLatin1().data(),
                0);

            break;*/
        
        case djvFFmpeg::MPEG4:

            pixel            = djvPixel::RGBA_U8;
            bgr              = info.bgr;

            avFormatName     = "mp4";
            avCodecId        = AV_CODEC_ID_MPEG4;
            avPixel          = AV_PIX_FMT_YUV420P;
            _avFrameRgbPixel = bgr ? AV_PIX_FMT_BGRA : AV_PIX_FMT_RGBA;

            switch (_options.quality)
            {
                case djvFFmpeg::LOW:    avQScale = 9.0; break;
                case djvFFmpeg::MEDIUM: avQScale = 3.0; break;
                case djvFFmpeg::HIGH:   avQScale = 1.0; break;

                default: break;
            }

            break;
        
        case djvFFmpeg::PRO_RES:

            pixel            = djvPixel::RGB_U16;
            bgr              = info.bgr;

            avFormatName     = "mov";
            avCodecId        = AV_CODEC_ID_PRORES;
            avPixel          = AV_PIX_FMT_YUV422P10;
            _avFrameRgbPixel = bgr ? AV_PIX_FMT_RGB48 : AV_PIX_FMT_BGR48;
         
            switch (_options.quality)
            {
                case djvFFmpeg::LOW:    value = "1"; break;
                case djvFFmpeg::MEDIUM: value = "2"; break;
                case djvFFmpeg::HIGH:   value = "3"; break;

                default: break;
            }

            av_dict_set(
                dictionary(),
                "profile",
                value.toLatin1().data(),
                0);

            break;
        
        case djvFFmpeg::MJPEG:

            pixel            = djvPixel::RGBA_U8;
            bgr              = info.bgr;

            avFormatName     = "mov";
            avCodecId        = AV_CODEC_ID_MJPEG;
            avPixel          = AV_PIX_FMT_YUVJ422P;
            _avFrameRgbPixel = bgr ? AV_PIX_FMT_BGRA : AV_PIX_FMT_RGBA;

            switch (_options.quality)
            {
                case djvFFmpeg::LOW:    avQScale = 9.0; break;
                case djvFFmpeg::MEDIUM: avQScale = 3.0; break;
                case djvFFmpeg::HIGH:   avQScale = 1.0; break;

                default: break;
            }

            break;
        
        default: break;
    }
    
    //DJV_DEBUG_PRINT("pixel = " << pixel);

    //DJV_DEBUGBUG_PRINT("av format name = " << avFormatName);
    //DJV_DEBUGBUG_PRINT("av codec id = " << avCodecId);
    //DJV_DEBUGBUG_PRINT("av pixel = " << avPixel);
    //DJV_DEBUGBUG_PRINT("av rgb pixel = " << _avFrameRgbPixel);
    //DJV_DEBUGBUG_PRINT("av qscale = " << avQScale);
    
    AVOutputFormat * avFormat = av_guess_format(
        avFormatName.toLatin1().data(),
        0, //fileInfo.fileName().toLatin1().data(),
        0);
    
    if (! avFormat)
    {
        throw djvError(
            djvFFmpeg::staticName,
            qApp->translate("djvFFmpegSave", "Cannot find format: %1").
                arg(djvFFmpeg::formatLabels()[_options.format]));
    }
    
    //DJV_DEBUGBUG_PRINT("av format extensions = " << avFormat->extensions);
    
    _avFormatContext = avformat_alloc_context();
    _avFormatContext->oformat = avFormat;

    AVCodec * avCodec = avcodec_find_encoder(avCodecId);

    if (! avCodec)
    {
        throw djvError(
            djvFFmpeg::staticName,
            qApp->translate("djvFFmpegSave", "Cannot find encoder: %1").
                arg(djvFFmpeg::formatLabels()[_options.format]));
    }

    AVCodecContext * avCodecContext = avcodec_alloc_context3(avCodec);
    
    avcodec_get_context_defaults3(avCodecContext, avCodec);
    
    //DJV_DEBUGBUG_PRINT("default bit rate = " << avCodecContext->bit_rate);
    //DJV_DEBUGBUG_PRINT("default gop = " << avCodecContext->gop_size);
    
    avCodecContext->pix_fmt       = avPixel;
    avCodecContext->width         = info.size.x;
    avCodecContext->height        = info.size.y;
    avCodecContext->time_base.den = info.sequence.speed.scale();
    avCodecContext->time_base.num = info.sequence.speed.duration();
    
    if (avFormat->flags & AVFMT_GLOBALHEADER)
        avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;

    if (avQScale >= 0.0)
    {
        avCodecContext->flags |= CODEC_FLAG_QSCALE;
        avCodecContext->global_quality = FF_QP2LAMBDA * avQScale;
    }
    
    int r = avcodec_open2(avCodecContext, avCodec, dictionary());
    
    if (r < 0)
    {
        throw djvError(
            djvFFmpeg::staticName,
            djvFFmpeg::toString(r));
    }

    _avStream = avformat_new_stream(_avFormatContext, avCodecContext->codec);

    if (! _avStream)
    {
        throw djvError(
            djvFFmpeg::staticName,
            qApp->translate("djvFFmpegSave", "Cannot create stream"));
    }
    
    _avStream->codec         = avCodecContext;
    _avStream->time_base.den = info.sequence.speed.scale();
    _avStream->time_base.num = info.sequence.speed.duration();
    
    r = avio_open2(
        &_avIoContext,
        fileInfo.fileName().toLatin1().data(),
        AVIO_FLAG_READ_WRITE,
        0,
        0);
    
    if (r < 0)
    {
        throw djvError(
            djvFFmpeg::staticName,
            djvFFmpeg::toString(r));
    }
    
    _avFormatContext->pb = _avIoContext;

    r = avformat_write_header(_avFormatContext, 0);
    
    if (r < 0)
    {
        throw djvError(
            djvFFmpeg::staticName,
            djvFFmpeg::toString(r));
    }
    
    _info          = djvPixelDataInfo();
    _info.fileName = fileInfo;
    _info.size     = info.size;
    _info.pixel    = pixel;
    _info.bgr      = info.bgr;

    // Initialize the buffers.
    
    _image.set(_info);
    
    _avFrame         = av_frame_alloc();
    _avFrame->width  = info.size.x;
    _avFrame->height = info.size.y;
    _avFrame->format = avCodecContext->pix_fmt;

    _avFrameBuf = (uint8_t *)av_malloc(
        avpicture_get_size(
            avCodecContext->pix_fmt,
            avCodecContext->width,
            avCodecContext->height));

    avpicture_fill(
        (AVPicture *)_avFrame,
        _avFrameBuf,
        avCodecContext->pix_fmt,
        avCodecContext->width,
        avCodecContext->height);

    _avFrameRgb = av_frame_alloc();
    
    // Initialize the software scaler.

    _swsContext = sws_getContext(
        info.size.x,
        info.size.y,
        _avFrameRgbPixel,
        avCodecContext->width,
        avCodecContext->height,
        avCodecContext->pix_fmt,
        SWS_BILINEAR,
        0,
        0,
        0);

    if (! _swsContext)
    {
        throw djvError(
            djvFFmpeg::staticName,
            qApp->translate("djvFFmpegSave", "Cannot create software scaler"));
    }
}
int RemoteCameraRtsp::Capture( Image &image )
{
	AVPacket packet;
	uint8_t* directbuffer;
	int frameComplete = false;
	
	/* Request a writeable buffer of the target image */
	directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
	if(directbuffer == NULL) {
		Error("Failed requesting writeable buffer for the captured image.");
		return (-1);
	}
	
    while ( true )
    {
        buffer.clear();
        if ( !rtspThread->isRunning() )
            return (-1);

        if ( rtspThread->getFrame( buffer ) )
        {
            Debug( 3, "Read frame %d bytes", buffer.size() );
            Debug( 4, "Address %p", buffer.head() );
            Hexdump( 4, buffer.head(), 16 );

            if ( !buffer.size() )
                return( -1 );

            if(mCodecContext->codec_id == AV_CODEC_ID_H264)
            {
                // SPS and PPS frames should be saved and appended to IDR frames
                int nalType = (buffer.head()[3] & 0x1f);
                
                // SPS
                if(nalType == 7)
                {
                    lastSps = buffer;
                    continue;
                }
                // PPS
                else if(nalType == 8)
                {
                    lastPps = buffer;
                    continue;
                }
                // IDR
                else if(nalType == 5)
                {
                    buffer += lastSps;
                    buffer += lastPps;
                }
            }

            av_init_packet( &packet );
            
	    while ( !frameComplete && buffer.size() > 0 )
	    {
		packet.data = buffer.head();
		packet.size = buffer.size();
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
		int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
#else
		int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
#endif
		if ( len < 0 )
		{
			Error( "Error while decoding frame %d", frameCount );
			Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
			buffer.clear();
			continue;
		}
		Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
		//if ( buffer.size() < 400 )
		   //Hexdump( 0, buffer.head(), buffer.size() );
		   
		buffer -= len;

	    }
            if ( frameComplete ) {
	       
		Debug( 3, "Got frame %d", frameCount );
			    
		avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
			
#if HAVE_LIBSWSCALE
		if(mConvertContext == NULL) {
			if(config.cpu_extensions && sseversion >= 20) {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC | SWS_CPU_CAPS_SSE2, NULL, NULL, NULL );
			} else {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
			}
			if(mConvertContext == NULL)
				Fatal( "Unable to create conversion context");
		}
	
		if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
			Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
		Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
	
		frameCount++;

	     } /* frame complete */
	     
	     av_free_packet( &packet );
	} /* getFrame() */
 
	if(frameComplete)
		return (0);
	
    }
    return (0) ;
}
示例#28
0
HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived(IDeckLinkVideoInputFrame* videoFrame, IDeckLinkAudioInputPacket* audioFrame)
{
    void *frameBytes;
    void *audioFrameBytes;
    BMDTimeValue frameTime;
    BMDTimeValue frameDuration;

    frameCount++;

    // Handle Video Frame
    if (videoFrame)
    {
        if (videoFrame->GetFlags() & bmdFrameHasNoInputSource)
            fprintf(stderr, "Frame received (#%lu) - No input signal detected - Frames dropped %u - Total dropped %u\n", frameCount, ++dropped, ++totaldropped);

        {
            AVPacket pkt;
            AVCodecContext *c;
            av_init_packet(&pkt);
            c = video_st->codec;
            if (g_verbose && frameCount % 25 == 0)
            {
                unsigned long long qsize = avpacket_queue_size(&queue);
                fprintf(stderr, "Frame received (#%lu) - Valid (%liB) - QSize %f\n", frameCount, videoFrame->GetRowBytes() * videoFrame->GetHeight(), (double)qsize/1024/1024);
            }
            videoFrame->GetBytes(&frameBytes);
            avpicture_fill((AVPicture*)picture, (uint8_t *)frameBytes,
                           PIX_FMT_UYVY422,
                           videoFrame->GetWidth(), videoFrame->GetHeight());
            videoFrame->GetStreamTime(&frameTime, &frameDuration,
                                      video_st->time_base.den);
            pkt.pts = pkt.dts = frameTime/video_st->time_base.num;
            pkt.duration = frameDuration;
            //To be made sure it still applies
            pkt.flags |= AV_PKT_FLAG_KEY;
            pkt.stream_index = video_st->index;
            pkt.data = (uint8_t *)frameBytes;
            pkt.size = videoFrame->GetRowBytes() * videoFrame->GetHeight();
            //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
            c->frame_number++;
//            av_interleaved_write_frame(oc, &pkt);
            avpacket_queue_put(&queue, &pkt);

            //write(videoOutputFile, frameBytes, videoFrame->GetRowBytes() * videoFrame->GetHeight());
        }
//        frameCount++;

        if (g_maxFrames > 0 && frameCount >= g_maxFrames ||
            avpacket_queue_size(&queue) > g_memoryLimit)
        {
            pthread_cond_signal(&sleepCond);
        }
    }

    // Handle Audio Frame
    if (audioFrame)
    {
        AVCodecContext *c;
        AVPacket pkt;
        BMDTimeValue audio_pts;
        av_init_packet(&pkt);

        c = audio_st->codec;
        //hack among hacks
        pkt.size = audioFrame->GetSampleFrameCount() *
                g_audioChannels * (g_audioSampleDepth / 8);
        audioFrame->GetBytes(&audioFrameBytes);
        audioFrame->GetPacketTime(&audio_pts, audio_st->time_base.den);
        pkt.dts = pkt.pts = audio_pts/audio_st->time_base.num;
        //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
        pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index = audio_st->index;
        pkt.data = (uint8_t *)audioFrameBytes;
        //pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
        c->frame_number++;
        //write(audioOutputFile, audioFrameBytes, audioFrame->GetSampleFrameCount() * g_audioChannels * (g_audioSampleDepth / 8));
/*            if (av_interleaved_write_frame(oc, &pkt) != 0) {
            fprintf(stderr, "Error while writing audio frame\n");
            exit(1);
        } */
        avpacket_queue_put(&queue, &pkt);
    }
    return S_OK;
}
int FfmpegCamera::Capture( Image &image )
{
    if (!mCanCapture){
        return -1;
    }

	AVPacket packet;
	uint8_t* directbuffer;
   
	/* Request a writeable buffer of the target image */
	directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
	if(directbuffer == NULL) {
		Error("Failed requesting writeable buffer for the captured image.");
		return (-1);
	}
    
    int frameComplete = false;
    while ( !frameComplete )
    {
        int avResult = av_read_frame( mFormatContext, &packet );
        if ( avResult < 0 )
        {
            if (avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached))
            {
                Info( "av_read_frame returned EOF. Reopening stream." );
                ReopenFfmpeg();
            } else if (avResult == -110) {
                Info( "av_read_frame returned \"%s\". Reopening stream.", strerror(avResult)) ;
                ReopenFfmpeg();
            } else {
                Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, avResult, strerror(avResult) );
            }
            return( -1 );
        }
        Debug( 5, "Got packet from stream %d", packet.stream_index );
        if ( packet.stream_index == mVideoStreamId )
        {
            if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 )
                Fatal( "Unable to decode frame at frame %d", frameCount );

            Debug( 4, "Decoded video packet at frame %d", frameCount );

            if ( frameComplete )
            {
                Debug( 3, "Got frame %d", frameCount );

		avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
		
#if HAVE_LIBSWSCALE
		if(mConvertContext == NULL) {
			if(config.cpu_extensions && sseversion >= 20) {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC | SWS_CPU_CAPS_SSE2, NULL, NULL, NULL );
			} else {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
			}
			if(mConvertContext == NULL)
				Fatal( "Unable to create conversion context for %s", mPath.c_str() );
		}
	
		if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
			Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
		Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
#endif // HAVE_LIBSWSCALE
 
                frameCount++;
            }
        }
        av_free_packet( &packet );
    }
    return (0);
}
示例#30
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVFrame         *pFrameRGB = NULL;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer= NULL;
  int ret, got_frame;
  
  if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
  }
  // Register all formats and codecs
  //  avcodec_register_all();
  av_register_all();
  

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) < 0 ) {
    av_log(NULL, AV_LOG_ERROR,"파일을 열 수 없습니다.\n");
    return -1;
  }

  
    
  // Retrieve stream information
  if((ret = avformat_find_stream_info(pFormatCtx,NULL)) < 0 ) {
    av_log(NULL, AV_LOG_ERROR,"stream 정보을 찾을 수 없습니다.\n");
    return ret; // Couldn't find stream information
  }
  
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream = av_find_best_stream(pFormatCtx,AVMEDIA_TYPE_VIDEO,-1,-1,&pCodec,0);

  
  if(videoStream < 0 ) {
    av_log(NULL,AV_LOG_ERROR,"Cannot find a video stream in the input file\n");
    return videoStream;
  }

  
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  /* pCodec=avcodec_find_decoder(pCodecCtx->codec_id); */
  /* if(pCodec==NULL) { */
  /*   av_log(NULL, AV_LOG_ERROR,"지원되지 않는 코덱입니다.\n"); */
  /*   return -1; // Codec not found */
  /* } */
  // Open codec


  if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=avcodec_alloc_frame();
  
  // Allocate an AVFrame structure
  pFrameRGB=avcodec_alloc_frame();
  if(pFrameRGB==NULL)
    return -1;

  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, 
         		      pCodecCtx->height); 
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); 
  
  /* // Assign appropriate parts of buffer to image planes in pFrameRGB */
  /* // Note that pFrameRGB is an AVFrame, but AVFrame is a superset */
  /* // of AVPicture */
   avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, 
         	 pCodecCtx->width, pCodecCtx->height); 


  av_init_packet(&packet);
  packet.data = NULL;
  packet.size = 0;

  // Read frames and save first five frames to disk
  i=0;

  
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	// Convert the image from its native format to RGB
	av_picture_crop((AVPicture *)pFrameRGB, (AVPicture*)pFrame, 
                        PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
	
	// Save the frame to disk
	if(++i<=100)
	  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
		    i);
        if( i >100 )
          break;
      }
    }
    
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);

    
  }
  
  // Free the RGB image
  av_free(buffer);
  
  printf(" av_free ");
  av_free(pFrameRGB);
  
  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}