void convert_image(AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avpkt, int *got_packet_ptr) {
	AVCodecContext *codecCtx;
	AVCodec *codec;
	
	*got_packet_ptr = 0;

	codec = avcodec_find_encoder(TARGET_IMAGE_CODEC);
	if (!codec) {
	    printf("avcodec_find_decoder() failed to find decoder\n");
		goto fail;
	}

    codecCtx = avcodec_alloc_context3(codec);
	if (!codecCtx) {
		printf("avcodec_alloc_context3 failed\n");
		goto fail;
	}

	codecCtx->bit_rate = pCodecCtx->bit_rate;
	codecCtx->width = pCodecCtx->width;
	codecCtx->height = pCodecCtx->height;
	codecCtx->pix_fmt = TARGET_IMAGE_FORMAT;
	codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
	codecCtx->time_base.num = pCodecCtx->time_base.num;
	codecCtx->time_base.den = pCodecCtx->time_base.den;

	if (!codec || avcodec_open2(codecCtx, codec, NULL) < 0) {
	  	printf("avcodec_open2() failed\n");
		goto fail;
	}

	AVFrame *pFrameRGB = avcodec_alloc_frame();
	
	if (!pFrameRGB) {
		goto fail;
	}

    avpicture_alloc((AVPicture *) pFrameRGB,
    		TARGET_IMAGE_FORMAT,
    		codecCtx->width,
    		codecCtx->height);
    
	struct SwsContext *scalerCtx = sws_getContext(pCodecCtx->width, 
			pCodecCtx->height, 
			pCodecCtx->pix_fmt, 
			pCodecCtx->width, 
			pCodecCtx->height, 
			TARGET_IMAGE_FORMAT, 
	        SWS_FAST_BILINEAR, 0, 0, 0);
	
	if (!scalerCtx) {
		printf("sws_getContext() failed\n");
		goto fail;
	}
    
    sws_scale(scalerCtx,
    		(const uint8_t * const *) pFrame->data,
    		pFrame->linesize,
    		0,
    		pFrame->height,
    		pFrameRGB->data,
    		pFrameRGB->linesize);
	
	int ret = avcodec_encode_video2(codecCtx, avpkt, pFrameRGB, got_packet_ptr);
	
	if (ret < 0) {
		*got_packet_ptr = 0;
	}
	
	// TODO is this right?
	fail:
	av_free(pFrameRGB);
	
	if (codecCtx) {
		avcodec_close(codecCtx);
	    av_free(codecCtx);
	}
	
	if (scalerCtx) {
		sws_freeContext(scalerCtx);
	}
	
	if (ret < 0 || !*got_packet_ptr) {
		av_free_packet(avpkt);
	}
}
Ejemplo n.º 2
0
/*! \brief This function applies deinterlacing (only if needed) and color
	conversion to the video frame in fRawDecodedPicture.

	It is assumed that fRawDecodedPicture wasn't deinterlaced and color
	converted yet (otherwise this function behaves in unknown manners).

	You should only call this function when you	got a new picture decoded by
	the video decoder and the fHeader variable was updated accordingly (\see
	_UpdateMediaHeaderForVideoFrame()).

	When this function finishes the postprocessed video frame will be available
	in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes
	will be set accordingly).
*/
void
AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame()
{
	int displayWidth = fHeader.u.raw_video.display_line_width;
	int displayHeight = fHeader.u.raw_video.display_line_count;
	AVPicture deinterlacedPicture;
	bool useDeinterlacedPicture = false;

	if (fRawDecodedPicture->interlaced_frame) {
		AVPicture rawPicture;
		rawPicture.data[0] = fRawDecodedPicture->data[0];
		rawPicture.data[1] = fRawDecodedPicture->data[1];
		rawPicture.data[2] = fRawDecodedPicture->data[2];
		rawPicture.data[3] = fRawDecodedPicture->data[3];
		rawPicture.linesize[0] = fRawDecodedPicture->linesize[0];
		rawPicture.linesize[1] = fRawDecodedPicture->linesize[1];
		rawPicture.linesize[2] = fRawDecodedPicture->linesize[2];
		rawPicture.linesize[3] = fRawDecodedPicture->linesize[3];

		avpicture_alloc(&deinterlacedPicture, fContext->pix_fmt, displayWidth,
			displayHeight);

		if (avpicture_deinterlace(&deinterlacedPicture, &rawPicture,
				fContext->pix_fmt, displayWidth, displayHeight) < 0) {
			TRACE("[v] avpicture_deinterlace() - error\n");
		} else
			useDeinterlacedPicture = true;
	}

	// Some decoders do not set pix_fmt until they have decoded 1 frame
#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
	if (fSwsContext == NULL) {
		fSwsContext = sws_getContext(displayWidth, displayHeight,
			fContext->pix_fmt, displayWidth, displayHeight,
			colorspace_to_pixfmt(fOutputColorSpace),
			SWS_FAST_BILINEAR, NULL, NULL, NULL);
	}
#else
	if (fFormatConversionFunc == NULL) {
		fFormatConversionFunc = resolve_colorspace(fOutputColorSpace,
			fContext->pix_fmt, displayWidth, displayHeight);
	}
#endif

	fDecodedDataSizeInBytes = avpicture_get_size(
		colorspace_to_pixfmt(fOutputColorSpace), displayWidth, displayHeight);

	if (fDecodedData == NULL)
		fDecodedData
			= static_cast<uint8_t*>(malloc(fDecodedDataSizeInBytes));

	fPostProcessedDecodedPicture->data[0] = fDecodedData;
	fPostProcessedDecodedPicture->linesize[0]
		= fHeader.u.raw_video.bytes_per_row;

#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
	if (fSwsContext != NULL) {
#else
	if (fFormatConversionFunc != NULL) {
#endif
		if (useDeinterlacedPicture) {
			AVFrame deinterlacedFrame;
			deinterlacedFrame.data[0] = deinterlacedPicture.data[0];
			deinterlacedFrame.data[1] = deinterlacedPicture.data[1];
			deinterlacedFrame.data[2] = deinterlacedPicture.data[2];
			deinterlacedFrame.data[3] = deinterlacedPicture.data[3];
			deinterlacedFrame.linesize[0]
				= deinterlacedPicture.linesize[0];
			deinterlacedFrame.linesize[1]
				= deinterlacedPicture.linesize[1];
			deinterlacedFrame.linesize[2]
				= deinterlacedPicture.linesize[2];
			deinterlacedFrame.linesize[3]
				= deinterlacedPicture.linesize[3];

#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
			sws_scale(fSwsContext, deinterlacedFrame.data,
				deinterlacedFrame.linesize, 0, displayHeight,
				fPostProcessedDecodedPicture->data,
				fPostProcessedDecodedPicture->linesize);
#else
			(*fFormatConversionFunc)(&deinterlacedFrame,
				fPostProcessedDecodedPicture, displayWidth, displayHeight);
#endif
		} else {
#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
			sws_scale(fSwsContext, fRawDecodedPicture->data,
				fRawDecodedPicture->linesize, 0, displayHeight,
				fPostProcessedDecodedPicture->data,
				fPostProcessedDecodedPicture->linesize);
#else
			(*fFormatConversionFunc)(fRawDecodedPicture,
				fPostProcessedDecodedPicture, displayWidth, displayHeight);
#endif
		}
	}

	if (fRawDecodedPicture->interlaced_frame)
		avpicture_free(&deinterlacedPicture);
}
Ejemplo n.º 3
0
static void vo_x11_zoom (int mulriple)
{
    unsigned long xswamask, event_mask;
    XSetWindowAttributes xswa;
    XSizeHints hint;
    XVisualInfo xvinfo;

    if (dlpctxp->pwidth * mulriple > screen_width || dlpctxp->pheight * mulriple > screen_height)
    {
        av_log (NULL, AV_LOG_INFO, "vo x11: zoom %d will > fullscrren\n", mulriple);
        return;
    }

    vo_lock ();

    if (Xvowin != None)
    {
        XClearWindow (Xdisplay, Xvowin);
        XUnmapWindow (Xdisplay, Xvowin);
        XDestroyWindow (Xdisplay, Xvowin);
        Xvowin = None;
    }
    if (Ximg)
    {
        Ximg->data = NULL;
        XDestroyImage (Ximg);
        Ximg = NULL;
    }

    avpicture_free (my_pic);
    av_free (my_pic);

    xswamask = CWBackingStore | CWBorderPixel;

    dw = dlpctxp->pwidth * mulriple;
    dh = dlpctxp->pheight * mulriple;

    dx = (screen_width - dw) / 2;
    dy = (screen_height - dh) / 2;

    Xdepth = XDefaultDepth (Xdisplay, 0);
    if (!XMatchVisualInfo (Xdisplay, Xscreen, Xdepth, DirectColor, &xvinfo))
        XMatchVisualInfo (Xdisplay, Xscreen, Xdepth, TrueColor, &xvinfo);

    xswa.background_pixel = 0;
    xswa.border_pixel = 0;
    xswa.backing_store = Always;
    xswa.bit_gravity = StaticGravity;

    Xvowin = XCreateWindow (Xdisplay, Xrootwin, dx, dy, dw, dh, 0, Xdepth, CopyFromParent, CopyFromParent, xswamask, &xswa);

    hint.x = dx;
    hint.y = dy;
    hint.width = dw;
    hint.height = dh;
    hint.flags = PPosition | PSize;
    XSetStandardProperties (Xdisplay, Xvowin, Xtitle, Xtitle, None, NULL, 0, &hint);

    XMapWindow (Xdisplay, Xvowin);
    XClearWindow (Xdisplay, Xvowin);

    event_mask = StructureNotifyMask | KeyPressMask | ExposureMask;
    XSelectInput (Xdisplay, Xvowin, event_mask);
    XSync (Xdisplay, False);

    Ximg = XCreateImage (Xdisplay, xvinfo.visual, Xdepth, ZPixmap, 0, NULL, dw, dh, 8, 0);

    if (dw < 600)
        Xfont = Xfont_120;
    else
        Xfont = Xfont_240;

    if (Xfont)
        XSetFont (Xdisplay, Xvogc, Xfont->fid);

    my_pic = av_mallocz (sizeof (AVPicture));
    avpicture_alloc (my_pic, my_pic_fmt, dw, dh);

    vo_unlock ();
}
Ejemplo n.º 4
0
//--------------------------------------------------------------------
// If a 24 bit video format is founded this is the preferred one, if not, the first
// returned by unicap is selected.
//
// Then it tries to set the desired width and height, if these fails, tries find the
// nearest size or to set the default width and height.
//
// On V4L devices 24 bit format is always BGR, so it needs conversion.
// On some V4L devices using non-default width/heigth it reports BGR but returns RGB.
// ffmpeg color conversion
void ofUCUtils::set_format(int w, int h) {
	if(!deviceReady)
		return;
	d_width=w;
	d_height=h;
	unicap_format_t formats[MAX_FORMATS];
	int format_count;
	unicap_status_t status = STATUS_SUCCESS;
	int rgb24 = -1;

	ofLog(OF_NOTICE,"ofUCUtils : Available formats for this device:");
	for (format_count = 0; SUCCESS (status) && (format_count < MAX_FORMATS); format_count++) {
		status = unicap_enumerate_formats (handle, NULL, &formats[format_count], format_count);
		if (SUCCESS (status)) {
			if (formats[format_count].bpp == 24) {
				rgb24 = format_count;
			}
			ofLog(OF_NOTICE,
					"ofUCUtils : %d: %s, min size: %dx%d, max size:%dx%d, default size: %dx%d",
					format_count, formats[format_count].identifier,
					formats[format_count].min_size.width,
					formats[format_count].min_size.height,
					formats[format_count].max_size.width,
					formats[format_count].max_size.height,
					formats[format_count].size.width,
					formats[format_count].size.height);
			ofLog(OF_VERBOSE,"ofUCUtils: available sizes for this format:");
			for(int i=0; i<formats[format_count].size_count;i++){
				ofLog(OF_VERBOSE,"          %dx%d",formats[format_count].sizes[i].width,formats[format_count].sizes[i].height);
			}
		}
	}

	if (format_count > 0) {
		int selected_format = 0;
		if (rgb24 != -1)
			selected_format = rgb24;
		else{
			for(selected_format=0;selected_format<format_count;selected_format++){
				format = formats[selected_format];
				if(fourcc_to_pix_fmt(format.fourcc)!=-1)
					break;
			}
		}
		format = formats[selected_format];

		bool exactMatch  = false;
		int sizeDiff = 99999999;
		int mostAproxSize = -1;

		for(int i=0; i<format.size_count;i++){
			if(format.sizes[i].width == w && format.sizes[i].height==h){
				exactMatch=true;
				format.size.width  = format.sizes[i].width;
				format.size.height = format.sizes[i].height;
				break;
			}else{
				if(abs(format.sizes[i].width-w)+abs(format.sizes[i].height-h)<sizeDiff){
					sizeDiff=abs(format.sizes[i].width-w)+abs(format.sizes[i].height-h);
					mostAproxSize=i;
				}
			}
		}
		if(!exactMatch && mostAproxSize!=-1){
			format.size.width  = format.sizes[mostAproxSize].width;
			format.size.height = format.sizes[mostAproxSize].height;

			ofLog(OF_WARNING, "ofUCUtils : Can't set video format %s, with size %dx%d, will use %dx%d",
							format.identifier, w, h,
							format.size.width, format.size.height);
		}else if(format.size_count==0){
			int defaultFormatWidth = format.size.width;
			int defaultFormatHeight = format.size.height;

			format.size.width  = w;
			format.size.height = h;

			ofLog(OF_WARNING, "ofUCUtils : Can't recognize supported video sizes for %s, trying with requested size: %i,%i",
										format.identifier, format.size.width, format.size.height);

			if ( !SUCCESS ( unicap_set_format (handle, &format) ) ) {
				format.size.width  = defaultFormatWidth;
				format.size.height = defaultFormatHeight;

				ofLog(OF_WARNING, "ofUCUtils : Can't set requested size, trying with format defaults: %i,%i",
						defaultFormatWidth, defaultFormatHeight);
			}
			ofLog(OF_WARNING, "ofUCUtils : If this doesn't work try using the reported default size in initGrabber:",
					defaultFormatWidth, defaultFormatHeight);
		}
		if ( !SUCCESS ( unicap_set_format (handle, &format) ) ) {
			ofLog(OF_ERROR, "ofUCUtils : Failed to set alternative video format!");
			return;
		}
		ofLog(OF_NOTICE,"ofUCUtils : Selected format: %s, with size %dx%d\n", format.identifier,
				format.size.width, format.size.height);

		src_pix_fmt=fourcc_to_pix_fmt(format.fourcc);
		if( src_pix_fmt==-1){
			ofLog(OF_ERROR,"ofUCUtils : Format not suported\n");
			return;
		}

		if(src_pix_fmt!=PIX_FMT_RGB24 || !exactMatch){
			src=new AVPicture;
			avpicture_alloc(src,src_pix_fmt,format.size.width,format.size.height);
			dst=new AVPicture;
			avpicture_alloc(dst,PIX_FMT_RGB24,d_width,d_height);

			toRGB_convert_ctx = sws_getContext(
							format.size.width, format.size.height, src_pix_fmt,
							d_width, d_height, PIX_FMT_RGB24,
							VIDEOGRABBER_RESIZE_FLAGS, NULL, NULL, NULL);


			ofLog(OF_NOTICE,"ofUCUtils: Converting to RGB24 (%i,%i)\n",w,h);

			pixels=new unsigned char[d_width*d_height*3];
		}

	   if( !SUCCESS( unicap_get_format( handle, &format ) ) )
	   {
		   ofLog(OF_ERROR, "can't get format" );
		   return;
	   }

		format.buffer_type = UNICAP_BUFFER_TYPE_SYSTEM;

	   if( !SUCCESS( unicap_set_format( handle, &format ) ) )
	   {
		   ofLog(OF_WARNING, "ofUCUtils: Failed to activate SYSTEM_BUFFERS" );
	   }
	}
}
Ejemplo n.º 5
0
bool FFmpegVideoDecoder::nextFrame( CBaseTexture * texture )
{
  // Just in case
  if ( !m_pCodecCtx )
	return false;

  // If we did not preallocate the picture or the texture size changed, (re)allocate it
  if ( !m_pFrameRGB || texture->GetWidth() != m_frameRGBwidth || texture->GetHeight() != m_frameRGBheight )
  {
    if ( m_pFrameRGB )
    {
      avpicture_free( m_pFrameRGB );
      av_free( m_pFrameRGB );
    }

    m_frameRGBwidth = texture->GetWidth();
    m_frameRGBheight = texture->GetHeight();

    // Allocate the conversion frame and relevant picture
    m_pFrameRGB = (AVPicture*)av_mallocz(sizeof(AVPicture));

    if ( !m_pFrameRGB )
      return false;

    // Due to a bug in swsscale we need to allocate one extra line of data
    if ( avpicture_alloc( m_pFrameRGB, PIX_FMT_RGB32, m_frameRGBwidth, m_frameRGBheight + 1 ) < 0 )
      return false;
  }

  AVPacket packet;
  int frameFinished;

  while ( true )
  {
    // Read a frame
    if ( av_read_frame( m_pFormatCtx, &packet ) < 0 )
      return false;  // Frame read failed (e.g. end of stream)

    if ( packet.stream_index == m_videoStream )
    {
      // Is this a packet from the video stream -> decode video frame
      avcodec_decode_video2( m_pCodecCtx, m_pFrame, &frameFinished, &packet );

      // Did we get a video frame?
      if ( frameFinished )
      {
        if ( packet.dts != (int64_t)AV_NOPTS_VALUE )
	  m_lastFrameTime = packet.dts * av_q2d( m_pFormatCtx->streams[ m_videoStream ]->time_base );
        else
	   m_lastFrameTime = 0.0;

	break;
      }
    }

    av_free_packet( &packet );
  }

  // We got the video frame, render it into the picture buffer
  struct SwsContext * context = sws_getContext( m_pCodecCtx->width, m_pCodecCtx->height, m_pCodecCtx->pix_fmt,
                           m_frameRGBwidth, m_frameRGBheight, PIX_FMT_RGB32, SWS_FAST_BILINEAR, NULL, NULL, NULL );

  sws_scale( context, m_pFrame->data, m_pFrame->linesize, 0, m_pCodecCtx->height,
                                                                     m_pFrameRGB->data, m_pFrameRGB->linesize );
  sws_freeContext( context );
  av_free_packet( &packet );

  // And into the texture
  texture->Update( m_frameRGBwidth, m_frameRGBheight, m_frameRGBwidth * 4, XB_FMT_A8R8G8B8, m_pFrameRGB->data[0], false );

  return true;
}
Ejemplo n.º 6
0
int FFMPEG_Wrapper::init(int input_width,
                          int input_height,
                          const ServerConfiguration& config)
{
  boost::mutex::scoped_lock lock(frame_mutex_);

  time_started_ = boost::posix_time::microsec_clock::local_time();

  config_  = config;

  input_width_ = input_width;
  input_height_ = input_height;

  output_width_ = config.frame_width_;
  output_height_ = config.frame_height_;

  if (output_width_<0)
    output_width_ = input_width_;

  if (output_height_<0)
    output_height_ = input_height_;

  av_lockmgr_register(&ff_lockmgr);

  /* register all the codecs */
  avcodec_register_all();
  av_register_all();

  // lookup webm codec
  avformat_alloc_output_context2(&ffmpeg_format_context_, NULL, config_.codec_.c_str(), NULL);
  if (!ffmpeg_format_context_) {
    return -1;
  }

  ffmpeg_output_format_ = ffmpeg_format_context_->oformat;

  /* Add the audio and video streams using the default format codecs
   * and initialize the codecs. */
  ffmpeg_video_st_ = NULL;
  if (ffmpeg_output_format_->video_codec != AV_CODEC_ID_NONE)
  {

    /* find the video encoder */
    ffmpeg_codec_ = avcodec_find_encoder(ffmpeg_output_format_->video_codec);
    if (!(ffmpeg_codec_))
    {
      fprintf(stderr, "Codec not found (%s)\n",config_.codec_.c_str());
      return -1;
    }

    ffmpeg_video_st_ = avformat_new_stream(ffmpeg_format_context_, ffmpeg_codec_);
    if (!ffmpeg_video_st_)
    {
      fprintf(stderr, "Could not alloc stream\n");
      return -1;
    }

    ffmpeg_codec_context_ = ffmpeg_video_st_->codec;



    avcodec_get_context_defaults3(ffmpeg_codec_context_, ffmpeg_codec_);

    //////////////////////////////////////////////
    // ffmpeg codec configuration
    //////////////////////////////////////////////

    ffmpeg_codec_context_->codec_id = ffmpeg_output_format_->video_codec;
    ffmpeg_codec_context_->bit_rate = config_.bitrate_;

    ffmpeg_codec_context_->width = output_width_;
    ffmpeg_codec_context_->height = output_height_;
    ffmpeg_codec_context_->delay = 0;

    ffmpeg_codec_context_->time_base.den = config_.framerate_+3; //increased framerate to compensate playback delay
    ffmpeg_codec_context_->time_base.num = 1;
    ffmpeg_codec_context_->gop_size = config_.gop_; /* emit one intra ffmpeg_frame_ every twelve frames at most */
    ffmpeg_codec_context_->pix_fmt = PIX_FMT_YUV420P;
    ffmpeg_codec_context_->max_b_frames = 0;

    av_opt_set(ffmpeg_codec_context_->priv_data, "quality", config_.profile_.c_str(), 0);

    av_opt_set(ffmpeg_codec_context_->priv_data, "deadline", "1", 0);
    av_opt_set(ffmpeg_codec_context_->priv_data, "auto-alt-ref", "0", 0);

    // lag in frames
    av_opt_set(ffmpeg_codec_context_->priv_data, "lag-in-frames", "1", 0);
    av_opt_set(ffmpeg_codec_context_->priv_data, "rc_lookahead", "1", 0);

    av_opt_set(ffmpeg_codec_context_->priv_data, "drop_frame", "1", 0);

    // enable error-resilient coding
    av_opt_set(ffmpeg_codec_context_->priv_data, "error-resilient", "1", 0);

    // buffer size of rate controller (length: rc_buffer_size/bitrate * 1000) ms
    int bufsize = 10;//ffmpeg_codec_context_->bit_rate/10;
    ffmpeg_codec_context_->rc_buffer_size = bufsize;
    // prebuffering at decoder
    ffmpeg_codec_context_->rc_initial_buffer_occupancy = bufsize ;//bitrate/3;  

    av_opt_set_int(ffmpeg_codec_context_->priv_data, "bufsize", bufsize, 0);
    av_opt_set_int(ffmpeg_codec_context_->priv_data, "buf-initial", bufsize, 0);
    av_opt_set_int(ffmpeg_codec_context_->priv_data, "buf-optimal", bufsize, 0);

    // buffer agressivity
    ffmpeg_codec_context_->rc_buffer_aggressivity = 0.5;

    // Quality settings
    //ffmpeg_codec_context_->qmin = 50;
    //ffmpeg_codec_context_->qmax = 62;
    if (config_.quality_>0)
      ffmpeg_codec_context_->qmin = config_.quality_;

     //ffmpeg_codec_context_->frame_skip_threshold = 100;

    /* Some formats want stream headers to be separate. */
    if (ffmpeg_format_context_->oformat->flags & AVFMT_GLOBALHEADER)
      ffmpeg_codec_context_->flags |= CODEC_FLAG_GLOBAL_HEADER;
  }

  if (ffmpeg_video_st_)
  {
    int ret;

    /* open the codec */
      {
         boost::mutex::scoped_lock lock(codec_mutex_);
         if (avcodec_open2(ffmpeg_codec_context_, ffmpeg_codec_, NULL) < 0) {
             fprintf(stderr, "Could not open video codec\n");
             return -1;
         }
      }  

      /* allocate and init a re-usable ffmpeg_frame_ */
      ffmpeg_frame_ = avcodec_alloc_frame();
      if (!ffmpeg_frame_) {
          fprintf(stderr, "Could not allocate video ffmpeg_frame_\n");
          return -1;
      }

      /* Allocate the encoded raw picture. */
      ret = avpicture_alloc(ffmpeg_dst_picture_, ffmpeg_codec_context_->pix_fmt, output_width_, output_height_);
      if (ret < 0) {
          fprintf(stderr, "Could not allocate picture\n");
          return -1;
      }

      /* If the output format is not YUV420P, then a temporary YUV420P
       * picture is needed too. It is then converted to the required
       * output format. */
          ret = avpicture_alloc(ffmpeg_src_picture_, AV_PIX_FMT_RGB24, input_width_, input_height_);
          if (ret < 0) {
              fprintf(stderr, "Could not allocate temporary picture\n");
              return -1;
          }

      /* copy data and linesize picture pointers to ffmpeg_frame_ */
      *((AVPicture *)ffmpeg_frame_) = *ffmpeg_dst_picture_;

      av_dump_format(ffmpeg_format_context_, 0, "", 1);

      ffmpeg_output_format_->flags |= AVFMT_NOFILE;

      if (ffmpeg_frame_)
          ffmpeg_frame_->pts = 0;
  }

  init_ = true;

  return 0;
}
Ejemplo n.º 7
0
	// Get packets (ordered by dts)
	//----------------------------------------------------------------------------------------------------
	bool EEVideo::LoadVideo(const char* _fileName)
	{
		AVCodec *codec = NULL;

		// open file
		if (avformat_open_input(&m_formatContext, _fileName, NULL, NULL) < 0)
		{
			return false;
		}

		// find stream info
		if (avformat_find_stream_info(m_formatContext, NULL) < 0)
		{
			//unable to find stream info
			avformat_close_input(&m_formatContext);
			return false;
		}
		// find the stream
		if ((m_streamIndex = av_find_best_stream(m_formatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0)) < 0)
		{
			avformat_close_input(&m_formatContext);
			return false;
		}

		// find decoder
		m_codecContext = m_formatContext->streams[m_streamIndex]->codec;
		codec = avcodec_find_decoder(m_codecContext->codec_id);
		if (!codec)
		{
			avformat_close_input(&m_formatContext);
			return false;
		}
		// open codec
		if (avcodec_open2(m_codecContext, codec, NULL) < 0)
		{
			avformat_close_input(&m_formatContext);
			return false;
		}

		if (m_width == -1)
			m_width = m_codecContext->width;
		if (m_height == -1)
			m_height = m_codecContext->height;
		m_swsContext = sws_getContext(m_codecContext->width, m_codecContext->height, m_codecContext->pix_fmt, m_width, m_height, AV_PIX_FMT_RGBA, SWS_BICUBIC, nullptr, nullptr, nullptr);
		if (!m_swsContext)
		{
			avformat_close_input(&m_formatContext);
			return false;
		}
		m_frameRGBA = av_frame_alloc();
		avpicture_alloc((AVPicture*)m_frameRGBA, AV_PIX_FMT_RGBA, m_width, m_height);
		m_frameRate = m_codecContext->framerate.num / m_codecContext->framerate.den;
		m_totalTime = m_formatContext->duration / (double)AV_TIME_BASE;
		m_timeBase = av_q2d(m_formatContext->streams[m_streamIndex]->time_base);
		int count = (int)(m_frameRate * m_totalTime * 1.1);	
		m_packets.reserve(count);

		int flag = 0;
		while (flag >= 0)
		{
			AVPacket *packet = new AVPacket;
			av_init_packet(packet);

			flag = av_read_frame(m_formatContext, packet);
			if (flag >= 0)
			{
				if (packet->stream_index == m_streamIndex)
				{
					m_packets.push_back(packet);
				}
				else
				{
					av_free_packet(packet);
					delete packet;
					packet = nullptr;
				}
			}
			else
			{
				av_free_packet(packet);
				delete packet;
				packet = nullptr;
			}
		}

		return true;
	}
Ejemplo n.º 8
0
bool FFMpegManager::writeVideoFrame(const QString &imagePath, AVFormatContext *oc, AVStream *st, int fps)
{
#if LIBAVCODEC_BUILD <= 4743
	AVCodecContext *c = &st->codec;
#else
	AVCodecContext *c = st->codec;
#endif

	AVFrame *picturePtr;
	
	double nbFrames = ((int)(m_streamDuration * fps));
	if (m_frameCount >= nbFrames)
	{
        	/* no more frame to compress. The codec has a latency of a few
		frames if using B frames, so we get the last frames by
		passing a 0 m_picture */
		picturePtr = 0;
	}
	else
	{
		QImage image(imagePath);
		
		AVPicture pictTmp;
		
		avpicture_alloc(&pictTmp, PIX_FMT_RGBA32,c->width, c->height);

		memcpy(pictTmp.data[0],image.bits(),c->width*c->height*4);

		img_convert((AVPicture *)m_picture,c->pix_fmt,&pictTmp,PIX_FMT_RGBA32,c->width,c->height);
		avpicture_free(&pictTmp);

		picturePtr = m_picture;
	}

	int out_size = -1, ret = -1;
	if (oc->oformat->flags & AVFMT_RAWPICTURE)
	{
        	/* raw video case. The API will change slightly in the near
		futur for that */
		AVPacket pkt;
		av_init_packet(&pkt);
        
		pkt.flags |= PKT_FLAG_KEY;
		pkt.stream_index= st->index;
		pkt.data= (uint8_t *)picturePtr;
		pkt.size= sizeof(AVPicture);
        
		ret = av_write_frame(oc, &pkt);
	} 
	else 
	{
		/* encode the image */
		out_size = avcodec_encode_video(c, videOutbuf, videOutbufSize, picturePtr);
// 		out_size = av_write_trailer(oc);
		/* if zero size, it means the image was buffered */

		if (out_size > 0)
		{
			AVPacket pkt;
			av_init_packet(&pkt);
            
// 			pkt.pts= c->coded_frame->pts;
			pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
			
			if(c->coded_frame->key_frame)
				pkt.flags |= PKT_FLAG_KEY;
			pkt.stream_index= st->index;
			pkt.data= videOutbuf;
			pkt.size= out_size;
            
			/* write the compressed frame in the media file */
			ret = av_write_frame(oc, &pkt);
		} 
		else 
		{
			ret = 0;
		}
	}
	if (ret != 0)
	{
		dError() << "Error while writing video frame";
		return false;
	}
	m_frameCount++;
	
	return true;
}
Ejemplo n.º 9
0
QbPacket VideoStream::convert(AVFrame *iFrame)
{
    AVPicture *oPicture;
    AVPixelFormat oFormat;
    bool delFrame = false;

    if (outputFormats->contains(AVPixelFormat(iFrame->format))) {
        oPicture = (AVPicture *) iFrame;
        oFormat = AVPixelFormat(iFrame->format);
    }
    else {
        oPicture = new AVPicture;
        oFormat = AV_PIX_FMT_BGRA;

        avpicture_alloc(oPicture,
                        oFormat,
                        iFrame->width,
                        iFrame->height);

        this->m_scaleContext = sws_getCachedContext(this->m_scaleContext,
                                                    iFrame->width,
                                                    iFrame->height,
                                                    AVPixelFormat(iFrame->format),
                                                    iFrame->width,
                                                    iFrame->height,
                                                    oFormat,
                                                    SWS_FAST_BILINEAR,
                                                    NULL,
                                                    NULL,
                                                    NULL);

        sws_scale(this->m_scaleContext,
                  (uint8_t **) iFrame->data,
                  iFrame->linesize,
                  0,
                  iFrame->height,
                  oPicture->data,
                  oPicture->linesize);

        delFrame = true;
    }

    QbVideoPacket packet;
    packet.caps().isValid() = true;
    packet.caps().format() = outputFormats->value(oFormat);
    packet.caps().width() = iFrame->width;
    packet.caps().height() = iFrame->height;
    packet.caps().fps() = this->fps();

    int frameSize = avpicture_get_size(oFormat,
                                       iFrame->width,
                                       iFrame->height);

    QbBufferPtr oBuffer(new char[frameSize]);

    avpicture_layout(oPicture,
                     oFormat,
                     iFrame->width,
                     iFrame->height,
                     (uint8_t *) oBuffer.data(),
                     frameSize);

    packet.buffer() = oBuffer;
    packet.bufferSize() = frameSize;
    packet.pts() = av_frame_get_best_effort_timestamp(iFrame);
    packet.timeBase() = this->timeBase();
    packet.index() = this->index();
    packet.id() = this->id();

    if (delFrame) {
        avpicture_free(oPicture);
        delete oPicture;
    }

    return packet.toPacket();
}
Ejemplo n.º 10
0
void FFMS_VideoSource::ReAdjustOutputFormat() {
	if (SWS) {
		sws_freeContext(SWS);
		SWS = NULL;
	}

	if (InputFormat == PIX_FMT_NONE)
		InputFormat = CodecContext->pix_fmt;

	AVColorRange RangeFromFormat = handle_jpeg(&InputFormat);

	if (InputColorRange == AVCOL_RANGE_UNSPECIFIED)
		InputColorRange = RangeFromFormat;
	if (InputColorRange == AVCOL_RANGE_UNSPECIFIED)
		InputColorRange = CodecContext->color_range;
	if (InputColorRange == AVCOL_RANGE_UNSPECIFIED)
		InputColorRange = AVCOL_RANGE_MPEG;

	if (InputColorSpace == AVCOL_SPC_UNSPECIFIED)
		InputColorSpace = CodecContext->colorspace;
	if (InputColorSpace == AVCOL_SPC_UNSPECIFIED)
		InputColorSpace = GetAssumedColorSpace(CodecContext->width, CodecContext->height);

	OutputFormat = FindBestPixelFormat(TargetPixelFormats, InputFormat);

	if (OutputFormat == PIX_FMT_NONE) {
		ResetOutputFormat();
		throw FFMS_Exception(FFMS_ERROR_SCALING, FFMS_ERROR_INVALID_ARGUMENT,
			"No suitable output format found");
	}

	OutputColorRange = handle_jpeg(&OutputFormat);
	if (OutputColorRange == AVCOL_RANGE_UNSPECIFIED)
		OutputColorRange = CodecContext->color_range;
	if (OutputColorRange == AVCOL_RANGE_UNSPECIFIED)
		OutputColorRange = InputColorRange;

	OutputColorSpace = CodecContext->colorspace;
	if (OutputColorSpace == AVCOL_SPC_UNSPECIFIED)
		OutputColorSpace = InputColorSpace;

	if (InputFormat != OutputFormat ||
		TargetWidth != CodecContext->width ||
		TargetHeight != CodecContext->height ||
		InputColorSpace != OutputColorSpace ||
		InputColorRange != OutputColorRange)
	{
		SWS = GetSwsContext(
			CodecContext->width, CodecContext->height, InputFormat, InputColorSpace, InputColorRange,
			TargetWidth, TargetHeight, OutputFormat, OutputColorSpace, OutputColorRange,
			GetSWSCPUFlags() | TargetResizer);

		if (!SWS) {
			ResetOutputFormat();
			throw FFMS_Exception(FFMS_ERROR_SCALING, FFMS_ERROR_INVALID_ARGUMENT,
				"Failed to allocate SWScale context");
		}
	}

	avpicture_free(&SWSFrame);
	avpicture_alloc(&SWSFrame, OutputFormat, TargetWidth, TargetHeight);
}
//------------------------------------------------------------------------------
void videoDecodingThread(ThreadInfo* p_threadInfo)
{
    // Read ThreadInfo struct, then delete it
    FFmpegVideoPlayer* videoPlayer = p_threadInfo->videoPlayer;
    VideoInfo& videoInfo = videoPlayer->getVideoInfo();
    boost::mutex* playerMutex = p_threadInfo->playerMutex;
    boost::condition_variable* playerCondVar = p_threadInfo->playerCondVar;
    boost::mutex* decodeMutex = p_threadInfo->decodingMutex;
    boost::condition_variable* decodeCondVar = p_threadInfo->decodingCondVar;
    bool isLoop = p_threadInfo->isLoop;
    staticOgreLog = videoPlayer->getLog();
    delete p_threadInfo;
    
    // Initialize FFmpeg  
    av_register_all();
    av_log_set_callback(log_callback);
    av_log_set_level(AV_LOG_WARNING);
    
    // Initialize video decoding, filling the VideoInfo
    // Open the input file
    AVFormatContext* formatContext = NULL;
    const char* name = videoPlayer->getVideoFilename().c_str();
    if (avformat_open_input(&formatContext, name, NULL, NULL) < 0) 
    {
        videoInfo.error = "Could not open input: ";
        videoInfo.error.append(videoPlayer->getVideoFilename());
        playerCondVar->notify_all();
        return;
    }
    
    // Read stream information
    if (avformat_find_stream_info(formatContext, NULL) < 0) 
    {
        videoInfo.error = "Could not find stream information.";
        playerCondVar->notify_all();
        return;
    }
    
    // Get streams
    // Audio stream
    AVStream* audioStream = NULL;
    AVCodecContext* audioCodecContext = NULL;
    int audioStreamIndex = -1;
    if (!openCodecContext(formatContext, AVMEDIA_TYPE_AUDIO, videoInfo, audioStreamIndex)) 
    {
        // The error itself is set by openCodecContext
        playerCondVar->notify_all();
        return;
    }
    audioStream = formatContext->streams[audioStreamIndex];
    audioCodecContext = audioStream->codec;
    
    // Video stream
    AVStream* videoStream = NULL;
    AVCodecContext* videoCodecContext = NULL;
    int videoStreamIndex = -1;
    if (!openCodecContext(formatContext, AVMEDIA_TYPE_VIDEO, videoInfo, videoStreamIndex)) 
    {
        // The error itself is set by openCodecContext
        playerCondVar->notify_all();
        return;
    }
    videoStream = formatContext->streams[videoStreamIndex];
    videoCodecContext = videoStream->codec;
    
    // Dump information
    av_dump_format(formatContext, 0, videoPlayer->getVideoFilename().c_str(), 0);
    
    // Store useful information in VideoInfo struct
    double timeBase = ((double)audioStream->time_base.num) / (double)audioStream->time_base.den;
    videoInfo.audioDuration = audioStream->duration * timeBase;
    videoInfo.audioSampleRate = audioCodecContext->sample_rate;
    videoInfo.audioBitRate = audioCodecContext->bit_rate;
    videoInfo.audioNumChannels = 
            videoInfo.audioNumChannels > 0 ? videoInfo.audioNumChannels : audioCodecContext->channels;
    
    timeBase = ((double)videoStream->time_base.num) / (double)videoStream->time_base.den;
    videoInfo.videoDuration = videoStream->duration * timeBase;
    videoInfo.videoWidth = videoCodecContext->width;
    videoInfo.videoHeight = videoCodecContext->height;
    
    // If the a duration is below 0 seconds, something is very fishy. 
    // Use format duration instead, it's the best guess we have
    if (videoInfo.audioDuration < 0.0)
    {
        videoInfo.audioDuration = ((double)formatContext->duration) / AV_TIME_BASE;
    }
    if (videoInfo.videoDuration < 0.0)
    {
        videoInfo.videoDuration = ((double)formatContext->duration) / AV_TIME_BASE;
    }
 
    // Store the longer of both durations. This is what determines when looped videos
    // will begin anew
    videoInfo.longerDuration = videoInfo.videoDuration > videoInfo.audioDuration ? 
                                videoInfo.videoDuration : videoInfo.audioDuration;
            
    // Wake up video player
    videoInfo.infoFilled = true;
    playerCondVar->notify_all();
    
    // Initialize packet, set data to NULL, let the demuxer fill it
    AVPacket packet;
    av_init_packet(&packet);
    packet.data = NULL;
    packet.size = 0;
    
    // Initialize SWS context
    SwsContext* swsContext = NULL;
    swsContext = sws_getCachedContext(swsContext,
                                videoInfo.videoWidth, videoInfo.videoHeight, videoCodecContext->pix_fmt, 
                                videoInfo.videoWidth, videoInfo.videoHeight, PIX_FMT_RGBA, 
                                SWS_BICUBIC, NULL, NULL, NULL);
    
    // Create destination picture
    AVFrame* destPic = avcodec_alloc_frame();
    avpicture_alloc((AVPicture*)destPic, PIX_FMT_RGBA, videoInfo.videoWidth, videoInfo.videoHeight);
    
    // Get the correct target channel layout
    uint64_t targetChannelLayout;
    // Keep the source layout
    if (audioCodecContext->channels == videoInfo.audioNumChannels)
    {
        targetChannelLayout = audioCodecContext->channel_layout;
    }
    // Or determine a new one
    else
    {
        switch (videoInfo.audioNumChannels)
        {
            case 1:
                targetChannelLayout = AV_CH_LAYOUT_MONO;
                break;
                
            case 2:
                targetChannelLayout = AV_CH_LAYOUT_STEREO;
                break;
                
            default:
                targetChannelLayout = audioCodecContext->channel_layout;
                break;
        }
    }
    
    // Initialize SWR context
    SwrContext* swrContext = swr_alloc_set_opts(NULL, 
                targetChannelLayout, AV_SAMPLE_FMT_FLT, audioCodecContext->sample_rate,
                audioCodecContext->channel_layout, audioCodecContext->sample_fmt, audioCodecContext->sample_rate, 
                0, NULL);
    int result = swr_init(swrContext);
    if (result != 0) 
    {
        videoInfo.error = "Could not initialize swr context: " + boost::lexical_cast<std::string>(result);
        playerCondVar->notify_all();
        return;
    }
    
    // Create destination sample buffer
    uint8_t** destBuffer = NULL;
    int destBufferLinesize;
    av_samples_alloc_array_and_samples( &destBuffer,
                                        &destBufferLinesize,
                                        videoInfo.audioNumChannels,
                                        2048,
                                        AV_SAMPLE_FMT_FLT,
                                        0);
    
    // Main decoding loop
    // Read the input file frame by frame
    AVFrame* frame = NULL;
    while (av_read_frame(formatContext, &packet) >= 0) 
    {
        // Only start decoding when at least one of the buffers is not full
        while (videoPlayer->getVideoBufferIsFull() && videoPlayer->getAudioBufferIsFull())
        {
            boost::unique_lock<boost::mutex> lock(*decodeMutex);
            boost::chrono::steady_clock::time_point const timeOut = 
                boost::chrono::steady_clock::now() + boost::chrono::milliseconds((int)videoPlayer->getBufferTarget() * 1000);
            decodeCondVar->wait_until(lock, timeOut);
            
            if (videoInfo.decodingAborted)
            {
                break;
            }
        }
            
        // Break if the decoding was aborted
        if (videoInfo.decodingAborted)
        {
            break;
        }
        
        // Initialize frame
        if (!frame) 
        {
            if (!(frame = avcodec_alloc_frame())) 
            {
                videoInfo.error = "Out of memory.";
                return;
            }
        } 
        else
        {
            avcodec_get_frame_defaults(frame);
        }
        
        // Decode the packet
        AVPacket orig_pkt = packet;
        do 
        {
            int decoded = 0;
            if (packet.stream_index == audioStreamIndex)
            {
                decoded = decodeAudioPacket(packet, audioCodecContext, audioStream, frame, swrContext,
                                            destBuffer, destBufferLinesize, videoPlayer, videoInfo, isLoop);
            }
            else if (packet.stream_index == videoStreamIndex)
            {
                decoded = decodeVideoPacket(packet, videoCodecContext, videoStream, frame, swsContext, 
                                            (AVPicture*)destPic, videoPlayer, videoInfo, isLoop);
            }
            else
            {
                // This means that we have a stream that is neither our video nor audio stream
                // Just skip the package
                break;
            }
            
            // decoded will be negative on an error
            if (decoded < 0)
            {
                // The error itself is set by the decode functions
                playerCondVar->notify_all();
                return;
            }
            
            // Increment data pointer, subtract from size
            packet.data += decoded;
            packet.size -= decoded;
        } while (packet.size > 0);
        
        av_free_packet(&orig_pkt);
    }
    
    // We're done. Close everything
    avcodec_free_frame(&frame);
    avpicture_free((AVPicture*)destPic);
    avcodec_free_frame(&destPic);
    avcodec_close(videoCodecContext);
    avcodec_close(audioCodecContext);
    sws_freeContext(swsContext);
    av_freep(&destBuffer[0]);
    swr_free(&swrContext);
    avformat_close_input(&formatContext);
    
    videoInfo.audioDuration = videoInfo.audioDecodedDuration;
    videoInfo.decodingDone = videoInfo.decodingAborted ? false : true;
}
Ejemplo n.º 12
0
FFMS_VideoSource::FFMS_VideoSource(const char *SourceFile, FFMS_Index &Index, int Track, int Threads)
: Index(Index)
, CodecContext(NULL)
{
	if (Track < 0 || Track >= static_cast<int>(Index.size()))
		throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_INVALID_ARGUMENT,
			"Out of bounds track index selected");

	if (Index[Track].TT != FFMS_TYPE_VIDEO)
		throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_INVALID_ARGUMENT,
			"Not a video track");

	if (Index[Track].size() == 0)
		throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_INVALID_ARGUMENT,
			"Video track contains no frames");

	if (!Index.CompareFileSignature(SourceFile))
		throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_FILE_MISMATCH,
			"The index does not match the source file");

	Frames = Index[Track];
	VideoTrack = Track;

	memset(&VP, 0, sizeof(VP));
#ifdef FFMS_USE_POSTPROC
	PPContext = NULL;
	PPMode = NULL;
#endif // FFMS_USE_POSTPROC
	SWS = NULL;
	LastFrameNum = 0;
	CurrentFrame = 1;
	DelayCounter = 0;
	InitialDecode = 1;
	CodecContext = NULL;

	LastFrameHeight = -1;
	LastFrameWidth = -1;
	LastFramePixelFormat = PIX_FMT_NONE;

	TargetHeight = -1;
	TargetWidth = -1;
	TargetResizer = 0;

	OutputFormat = PIX_FMT_NONE;
	OutputColorSpace = AVCOL_SPC_UNSPECIFIED;
	OutputColorRange = AVCOL_RANGE_UNSPECIFIED;

	InputFormatOverridden = false;
	InputFormat = PIX_FMT_NONE;
	InputColorSpace = AVCOL_SPC_UNSPECIFIED;
	InputColorRange = AVCOL_RANGE_UNSPECIFIED;

	if (Threads < 1)
		DecodingThreads = GetNumberOfLogicalCPUs();
	else
		DecodingThreads = Threads;
	DecodeFrame = avcodec_alloc_frame();

	// Dummy allocations so the unallocated case doesn't have to be handled later
#ifdef FFMS_USE_POSTPROC
	avpicture_alloc(&PPFrame, PIX_FMT_GRAY8, 16, 16);
#endif // FFMS_USE_POSTPROC
	avpicture_alloc(&SWSFrame, PIX_FMT_GRAY8, 16, 16);

	Index.AddRef();
}
Ejemplo n.º 13
0
void FeVideoImp::video_thread()
{
#if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 55, 45, 0 ))
	const unsigned int MAX_QUEUE( 4 ), MIN_QUEUE( 0 );
#else
	const unsigned int MAX_QUEUE( 1 ), MIN_QUEUE( 0 );
#endif

	bool exhaust_queue( false );
	sf::Time max_sleep = time_base / (sf::Int64)2;

	int qscore( 100 ), qadjust( 10 ); // quality scoring
	int displayed( 0 ), discarded( 0 ), qscore_accum( 0 );

	std::queue<AVFrame *> frame_queue;

	AVPicture *my_pict = (AVPicture *)av_malloc( sizeof( AVPicture ) );
	avpicture_alloc( my_pict, PIX_FMT_RGBA,
							codec_ctx->width,
							codec_ctx->height );

	if ((!sws_ctx) || (!my_pict) )
	{
		std::cerr << "Error initializing video thread" << std::endl;
		goto the_end;
	}

	while ( run_video_thread )
	{
		bool do_process = true;
		bool discard_frames = false;

		//
		// First, display queued frames if they are coming up
		//
		if (( frame_queue.size() > MIN_QUEUE )
			|| ( exhaust_queue && !frame_queue.empty() ))
		{
			sf::Time wait_time = (sf::Int64)frame_queue.front()->pts * time_base
										- m_parent->get_video_time();

			if ( wait_time < max_sleep )
			{
				if ( wait_time < -time_base )

				{
					// If we are falling behind, we may need to start discarding
					// frames to catch up
					//
					qscore -= qadjust;
					set_avdiscard_from_qscore( codec_ctx, qscore );
					discard_frames = ( codec_ctx->skip_frame == AVDISCARD_ALL );
				}
				else if ( wait_time >= sf::Time::Zero )
				{
					if ( discard_frames )
					{
						//
						// Only stop discarding frames once we have caught up and are
						// time_base ahead of the desired presentation time
						//
						if ( wait_time >= time_base )
							discard_frames = false;
					}
					else
					{
						//
						// Otherwise, we are ahead and can sleep until presentation time
						//
						sf::sleep( wait_time );
					}
				}

				AVFrame *detached_frame = frame_queue.front();
				frame_queue.pop();

				qscore_accum += qscore;
				if ( discard_frames )
				{
					discarded++;
					continue;
				}

				sf::Lock l( image_swap_mutex );
				displayed++;

				sws_scale( sws_ctx, detached_frame->data, detached_frame->linesize,
							0, codec_ctx->height, my_pict->data,
							my_pict->linesize );

				display_frame = my_pict->data[0];
				free_frame( detached_frame );

				do_process = false;
			}
			//
			// if we didn't do anything above, then we go into the queue
			// management process below
			//
		}

		if ( do_process )
		{
			if ( frame_queue.size() < MAX_QUEUE )
			{
				//
				// get next packet
				//
				AVPacket *packet = pop_packet();
				if ( packet == NULL )
				{
					if ( !m_parent->end_of_file() )
						m_parent->read_packet();
					else if ( frame_queue.empty() )
						goto the_end;
					else
						exhaust_queue=true;
				}
				else
				{
					//
					// decompress packet and put it in our frame queue
					//
					int got_frame = 0;
#if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 55, 45, 0 ))
					AVFrame *raw_frame = av_frame_alloc();
					codec_ctx->refcounted_frames = 1;
#else
					AVFrame *raw_frame = avcodec_alloc_frame();
#endif

					int len = avcodec_decode_video2( codec_ctx, raw_frame,
											&got_frame, packet );
					if ( len < 0 )
						std::cerr << "Error decoding video" << std::endl;

					if ( got_frame )
					{
						raw_frame->pts = raw_frame->pkt_pts;

						if ( raw_frame->pts == AV_NOPTS_VALUE )
							raw_frame->pts = packet->dts;

						frame_queue.push( raw_frame );
					}
					else
						free_frame( raw_frame );

					free_packet( packet );
				}
			}
			else
			{
				// Adjust our quality scoring, increasing it if it is down
				//
				if (( codec_ctx->skip_frame != AVDISCARD_DEFAULT )
						&& ( qadjust > 1 ))
					qadjust--;

				if ( qscore <= -100 ) // we stick at the lowest rate if we are actually discarding frames
					qscore = -100;
				else if ( qscore < 100 )
					qscore += qadjust;

				set_avdiscard_from_qscore( codec_ctx, qscore );

				//
				// full frame queue and nothing to display yet, so sleep
				//
				sf::sleep( max_sleep );
			}
		}
	}

the_end:
	//
	// shutdown the thread
	//
	at_end=true;

	if ( my_pict )
	{
		sf::Lock l( image_swap_mutex );

		avpicture_free( my_pict );
		av_free( my_pict );
		display_frame=NULL;
	}

	while ( !frame_queue.empty() )
	{
		AVFrame *f=frame_queue.front();
		frame_queue.pop();

		if (f)
			free_frame( f );
	}

#ifdef FE_DEBUG

	int total_shown = displayed + discarded;
	int average = ( total_shown == 0 ) ? qscore_accum : ( qscore_accum / total_shown );

	std::cout << "End Video Thread - " << m_parent->m_format_ctx->filename << std::endl
				<< " - bit_rate=" << codec_ctx->bit_rate
				<< ", width=" << codec_ctx->width << ", height=" << codec_ctx->height << std::endl
				<< " - displayed=" << displayed << ", discarded=" << discarded << std::endl
				<< " - average qscore=" << average
				<< std::endl;
#endif
}
Ejemplo n.º 14
0
void FeVideoImp::preload()
{
	bool keep_going = true;
	while ( keep_going )
	{
		AVPacket *packet = pop_packet();
		if ( packet == NULL )
		{
			if ( !m_parent->end_of_file() )
				m_parent->read_packet();
			else
				keep_going = false;
		}
		else
		{
			//
			// decompress packet and put it in our frame queue
			//
			int got_frame = 0;
#if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 55, 45, 0 ))
			AVFrame *raw_frame = av_frame_alloc();
			codec_ctx->refcounted_frames = 1;
#else
			AVFrame *raw_frame = avcodec_alloc_frame();
#endif

			int len = avcodec_decode_video2( codec_ctx, raw_frame,
									&got_frame, packet );
			if ( len < 0 )
			{
				std::cerr << "Error decoding video" << std::endl;
				keep_going=false;
			}

			if ( got_frame )
			{
				AVPicture *my_pict = (AVPicture *)av_malloc( sizeof( AVPicture ) );
				avpicture_alloc( my_pict, PIX_FMT_RGBA,
										codec_ctx->width,
										codec_ctx->height );

				if ( !my_pict )
				{
					std::cerr << "Error allocating AVPicture during preload" << std::endl;
					free_frame( raw_frame );
					free_packet( packet );
					return;
				}

				if ( (codec_ctx->width & 0x7) || (codec_ctx->height & 0x7) )
					sws_flags |= SWS_ACCURATE_RND;

				sws_ctx = sws_getCachedContext( NULL,
								codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
								codec_ctx->width, codec_ctx->height, PIX_FMT_RGBA,
								sws_flags, NULL, NULL, NULL );

				if ( !sws_ctx )
				{
					std::cerr << "Error allocating SwsContext during preload" << std::endl;
					avpicture_free( my_pict );
					av_free( my_pict );
					free_frame( raw_frame );
					free_packet( packet );
					return;
				}

				sws_scale( sws_ctx, raw_frame->data, raw_frame->linesize,
							0, codec_ctx->height, my_pict->data,
							my_pict->linesize );

				sf::Lock l( image_swap_mutex );
				display_texture->update( my_pict->data[0] );

				keep_going = false;

				avpicture_free( my_pict );
				av_free( my_pict );
			}

			free_frame( raw_frame );
			free_packet( packet );
		}
	}
}
Ejemplo n.º 15
0
status_t
AVCodecEncoder::_Setup()
{
	TRACE("AVCodecEncoder::_Setup\n");

	int rawBitRate;

	if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
		TRACE("  B_MEDIA_RAW_VIDEO\n");
		// frame rate
		fContext->time_base.den = (int)fInputFormat.u.raw_video.field_rate;
		fContext->time_base.num = 1;
		// video size
		fContext->width = fInputFormat.u.raw_video.display.line_width;
		fContext->height = fInputFormat.u.raw_video.display.line_count;
		fContext->gop_size = 12;
		// TODO: Fix pixel format or setup conversion method...
		for (int i = 0; fCodec->pix_fmts[i] != PIX_FMT_NONE; i++) {
			// Use the last supported pixel format, which we hope is the
			// one with the best quality.
			fContext->pix_fmt = fCodec->pix_fmts[i];
		}

		// TODO: Setup rate control:
//		fContext->rate_emu = 0;
//		fContext->rc_eq = NULL;
//		fContext->rc_max_rate = 0;
//		fContext->rc_min_rate = 0;
		// TODO: Try to calculate a good bit rate...
		rawBitRate = (int)(fContext->width * fContext->height * 2
			* fInputFormat.u.raw_video.field_rate) * 8;

		// Pixel aspect ratio
		fContext->sample_aspect_ratio.num
			= fInputFormat.u.raw_video.pixel_width_aspect;
		fContext->sample_aspect_ratio.den
			= fInputFormat.u.raw_video.pixel_height_aspect;
		if (fContext->sample_aspect_ratio.num == 0
			|| fContext->sample_aspect_ratio.den == 0) {
			av_reduce(&fContext->sample_aspect_ratio.num,
				&fContext->sample_aspect_ratio.den, fContext->width,
				fContext->height, 255);
		}

		// TODO: This should already happen in AcceptFormat()
		if (fInputFormat.u.raw_video.display.bytes_per_row == 0) {
			fInputFormat.u.raw_video.display.bytes_per_row
				= fContext->width * 4;
		}

		fFrame->pts = 0;

		// Allocate space for colorspace converted AVPicture
		// TODO: Check allocations...
		avpicture_alloc(&fDstFrame, fContext->pix_fmt, fContext->width,
			fContext->height);

		// Make the frame point to the data in the converted AVPicture
		fFrame->data[0] = fDstFrame.data[0];
		fFrame->data[1] = fDstFrame.data[1];
		fFrame->data[2] = fDstFrame.data[2];
		fFrame->data[3] = fDstFrame.data[3];

		fFrame->linesize[0] = fDstFrame.linesize[0];
		fFrame->linesize[1] = fDstFrame.linesize[1];
		fFrame->linesize[2] = fDstFrame.linesize[2];
		fFrame->linesize[3] = fDstFrame.linesize[3];

		fSwsContext = sws_getContext(fContext->width, fContext->height,
			colorspace_to_pixfmt(fInputFormat.u.raw_video.display.format),
			fContext->width, fContext->height,
			fContext->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);

	} else if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
		TRACE("  B_MEDIA_RAW_AUDIO\n");
		// frame rate
		fContext->sample_rate = (int)fInputFormat.u.raw_audio.frame_rate;
		// channels
		fContext->channels = fInputFormat.u.raw_audio.channel_count;
		// raw bitrate
		rawBitRate = fContext->sample_rate * fContext->channels
			* (fInputFormat.u.raw_audio.format
				& media_raw_audio_format::B_AUDIO_SIZE_MASK) * 8;
		// sample format
		switch (fInputFormat.u.raw_audio.format) {
			case media_raw_audio_format::B_AUDIO_FLOAT:
				fContext->sample_fmt = SAMPLE_FMT_FLT;
				break;
			case media_raw_audio_format::B_AUDIO_DOUBLE:
				fContext->sample_fmt = SAMPLE_FMT_DBL;
				break;
			case media_raw_audio_format::B_AUDIO_INT:
				fContext->sample_fmt = SAMPLE_FMT_S32;
				break;
			case media_raw_audio_format::B_AUDIO_SHORT:
				fContext->sample_fmt = SAMPLE_FMT_S16;
				break;
			case media_raw_audio_format::B_AUDIO_UCHAR:
				fContext->sample_fmt = SAMPLE_FMT_U8;
				break;

			case media_raw_audio_format::B_AUDIO_CHAR:
			default:
				return B_MEDIA_BAD_FORMAT;
				break;
		}
		if (fInputFormat.u.raw_audio.channel_mask == 0) {
			// guess the channel mask...
			switch (fInputFormat.u.raw_audio.channel_count) {
				default:
				case 2:
					fContext->channel_layout = CH_LAYOUT_STEREO;
					break;
				case 1:
					fContext->channel_layout = CH_LAYOUT_MONO;
					break;
				case 3:
					fContext->channel_layout = CH_LAYOUT_SURROUND;
					break;
				case 4:
					fContext->channel_layout = CH_LAYOUT_QUAD;
					break;
				case 5:
					fContext->channel_layout = CH_LAYOUT_5POINT0;
					break;
				case 6:
					fContext->channel_layout = CH_LAYOUT_5POINT1;
					break;
				case 8:
					fContext->channel_layout = CH_LAYOUT_7POINT1;
					break;
				case 10:
					fContext->channel_layout = CH_LAYOUT_7POINT1_WIDE;
					break;
			}
		} else {
			// The bits match 1:1 for media_multi_channels and FFmpeg defines.
			fContext->channel_layout = fInputFormat.u.raw_audio.channel_mask;
		}
	} else {
		TRACE("  UNSUPPORTED MEDIA TYPE!\n");
		return B_NOT_SUPPORTED;
	}

	// TODO: Support letting the user overwrite this via
	// SetEncodeParameters(). See comments there...
	int wantedBitRate = (int)(rawBitRate / fBitRateScale
		* fEncodeParameters.quality);
	if (wantedBitRate == 0)
		wantedBitRate = (int)(rawBitRate / fBitRateScale);

	fContext->bit_rate = wantedBitRate;

	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
		// Some audio encoders support certain bitrates only. Use the
		// closest match to the wantedBitRate.
		const int kBitRates[] = {
			32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000,
			160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000,
			576000, 640000
		};
		int diff = wantedBitRate;
		for (int i = 0; i < sizeof(kBitRates) / sizeof(int); i++) {
			int currentDiff = abs(wantedBitRate - kBitRates[i]);
			if (currentDiff < diff) {
				fContext->bit_rate = kBitRates[i];
				diff = currentDiff;
			} else
				break;
		}
	}

	TRACE("  rawBitRate: %d, wantedBitRate: %d (%.1f), "
		"context bitrate: %d\n", rawBitRate, wantedBitRate,
		fEncodeParameters.quality, fContext->bit_rate);

	// Add some known fixes from the FFmpeg API example:
	if (fContext->codec_id == CODEC_ID_MPEG2VIDEO) {
		// Just for testing, we also add B frames */
		fContext->max_b_frames = 2;
    } else if (fContext->codec_id == CODEC_ID_MPEG1VIDEO){
		// Needed to avoid using macroblocks in which some coeffs overflow.
		// This does not happen with normal video, it just happens here as
		// the motion of the chroma plane does not match the luma plane.
		fContext->mb_decision = 2;
    }

	// Unfortunately, we may fail later, when we try to open the codec
	// for real... but we need to delay this because we still allow
	// parameter/quality changes.
	return B_OK;
}
Ejemplo n.º 16
0
MediaRet MediaRecorder::setup_video_stream(const char *fname, int w, int h, int d)
{
    AVCodecContext *ctx;
    vid_st = av_new_stream(oc, 0);
    if(!vid_st) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOMEM;
    }
    ctx = vid_st->codec;
    ctx->codec_id = oc->oformat->video_codec;
    ctx->codec_type = AVMEDIA_TYPE_VIDEO;
    ctx->width = w;
    ctx->height = h;
    ctx->time_base.den = 60;
    ctx->time_base.num = 1;
    // dunno if any of these help; some output just looks plain crappy
    // will have to investigate further
    ctx->bit_rate = 400000;
    ctx->gop_size = 12;
    ctx->max_b_frames = 2;
    switch(d) {
    case 16:
	// FIXME: test & make endian-neutral
	pixfmt = PIX_FMT_RGB565LE;
	break;
    case 24:
	pixfmt = PIX_FMT_RGB24;
	break;
    case 32:
    default: // should never be anything else
	pixfmt = PIX_FMT_RGBA;
	break;
    }
    ctx->pix_fmt = pixfmt;
    pixsize = d >> 3;
    linesize = pixsize * w;
    ctx->max_b_frames = 2;
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
	ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    AVCodec *codec = avcodec_find_encoder(oc->oformat->video_codec);
    // make sure RGB is supported (mostly not)
    if(codec->pix_fmts) {
	const enum PixelFormat *p;
	int64_t mask = 0;
	for(p = codec->pix_fmts; *p != -1; p++) {
	    // may get complaints about 1LL; thus the cast
	    mask |= ((int64_t)1) << *p;
	    if(*p == pixfmt)
		break;
	}
	if(*p == -1) {
	    // if not supported, use a converter to the next best format
	    // this is swscale, the converter used by the output demo
	    enum PixelFormat dp = (PixelFormat)avcodec_find_best_pix_fmt(mask, pixfmt, 0, NULL);
	    if(dp == -1)
		dp = codec->pix_fmts[0];
	    if(!(convpic = avcodec_alloc_frame()) ||
	       avpicture_alloc((AVPicture *)convpic, dp, w, h) < 0) {
		avformat_free_context(oc);
		oc = NULL;
		return MRET_ERR_NOMEM;
	    }
#if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0)
	    converter = sws_getContext(w, h, pixfmt, w, h, dp, SWS_BICUBIC,
				       NULL, NULL, NULL);
#else
	    converter = sws_alloc_context();
	    // what a convoluted, inefficient way to set options
	    av_set_int(converter, "sws_flags", SWS_BICUBIC);
	    av_set_int(converter, "srcw", w);
	    av_set_int(converter, "srch", h);
	    av_set_int(converter, "dstw", w);
	    av_set_int(converter, "dsth", h);
	    av_set_int(converter, "src_format", pixfmt);
	    av_set_int(converter, "dst_format", dp);
	    sws_init_context(converter, NULL, NULL);
#endif
	    ctx->pix_fmt = dp;
	}
    }
    if(!codec || avcodec_open(ctx, codec)) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOCODEC;
    }

    return MRET_OK;
}
Ejemplo n.º 17
0
void LibavStreamer::initialize(const cv::Mat &img)
{
  // Load format
  format_context_ = avformat_alloc_context();
  if (!format_context_)
  {
    async_web_server_cpp::HttpReply::stock_reply(async_web_server_cpp::HttpReply::internal_server_error)(request_,
                                                                                                         connection_,
                                                                                                         NULL, NULL);
    throw std::runtime_error("Error allocating ffmpeg format context");
  }
  output_format_ = av_guess_format(format_name_.c_str(), NULL, NULL);
  if (!output_format_)
  {
    async_web_server_cpp::HttpReply::stock_reply(async_web_server_cpp::HttpReply::internal_server_error)(request_,
                                                                                                         connection_,
                                                                                                         NULL, NULL);
    throw std::runtime_error("Error looking up output format");
  }
  format_context_->oformat = output_format_;

  // Load codec
  if (codec_name_.empty()) // use default codec if none specified
    codec_ = avcodec_find_encoder(output_format_->video_codec);
  else
    codec_ = avcodec_find_encoder_by_name(codec_name_.c_str());
  if (!codec_)
  {
    async_web_server_cpp::HttpReply::stock_reply(async_web_server_cpp::HttpReply::internal_server_error)(request_,
                                                                                                         connection_,
                                                                                                         NULL, NULL);
    throw std::runtime_error("Error looking up codec");
  }
  video_stream_ = avformat_new_stream(format_context_, codec_);
  if (!video_stream_)
  {
    async_web_server_cpp::HttpReply::stock_reply(async_web_server_cpp::HttpReply::internal_server_error)(request_,
                                                                                                         connection_,
                                                                                                         NULL, NULL);
    throw std::runtime_error("Error creating video stream");
  }
  codec_context_ = video_stream_->codec;

  // Set options
  avcodec_get_context_defaults3(codec_context_, codec_);

  codec_context_->codec_id = output_format_->video_codec;
  codec_context_->bit_rate = bitrate_;

  codec_context_->width = output_width_;
  codec_context_->height = output_height_;
  codec_context_->delay = 0;

  video_stream_->time_base.num = 1;
  video_stream_->time_base.den = 1000;

  codec_context_->time_base.num = 1;
  codec_context_->time_base.den = 1;
  codec_context_->gop_size = gop_;
  codec_context_->pix_fmt = PIX_FMT_YUV420P;
  codec_context_->max_b_frames = 0;

  // Quality settings
  codec_context_->qmin = qmin_;
  codec_context_->qmax = qmax_;

  initializeEncoder();

  // Some formats want stream headers to be separate
  if (format_context_->oformat->flags & AVFMT_GLOBALHEADER)
    codec_context_->flags |= CODEC_FLAG_GLOBAL_HEADER;

  // Open Codec
  if (avcodec_open2(codec_context_, codec_, NULL) < 0)
  {
    async_web_server_cpp::HttpReply::stock_reply(async_web_server_cpp::HttpReply::internal_server_error)(request_,
                                                                                                         connection_,
                                                                                                         NULL, NULL);
    throw std::runtime_error("Could not open video codec");
  }

  // Allocate frame buffers
  frame_ = avcodec_alloc_frame();
  tmp_picture_ = new AVPicture;
  picture_ = new AVPicture;
  int ret = avpicture_alloc(picture_, codec_context_->pix_fmt, output_width_, output_height_);
  if (ret < 0)
  {
    async_web_server_cpp::HttpReply::stock_reply(async_web_server_cpp::HttpReply::internal_server_error)(request_,
                                                                                                         connection_,
                                                                                                         NULL, NULL);
    throw std::runtime_error("Could not allocate picture frame");
  }
  *((AVPicture *)frame_) = *picture_;

  output_format_->flags |= AVFMT_NOFILE;

  // Generate header
  std::vector<uint8_t> header_buffer;
  std::size_t header_size;
  uint8_t *header_raw_buffer;
  // define meta data
  av_dict_set(&format_context_->metadata, "author", "ROS web_video_server", 0);
  av_dict_set(&format_context_->metadata, "title", topic_.c_str(), 0);

  if (avio_open_dyn_buf(&format_context_->pb) >= 0)
  {
    if (avformat_write_header(format_context_, NULL) < 0)
    {
      async_web_server_cpp::HttpReply::stock_reply(async_web_server_cpp::HttpReply::internal_server_error)(request_,
                                                                                                           connection_,
                                                                                                           NULL, NULL);
      throw std::runtime_error("Error openning dynamic buffer");
    }
    header_size = avio_close_dyn_buf(format_context_->pb, &header_raw_buffer);

    // copy header buffer to vector
    header_buffer.resize(header_size);
    memcpy(&header_buffer[0], header_raw_buffer, header_size);

    av_free(header_raw_buffer);
  }

  // Send response headers
  async_web_server_cpp::HttpReply::builder(async_web_server_cpp::HttpReply::ok).header("Connection", "close").header(
      "Server", "web_video_server").header("Cache-Control",
                                           "no-cache, no-store, must-revalidate, pre-check=0, post-check=0, max-age=0").header(
      "Pragma", "no-cache").header("Expires", "0").header("Max-Age", "0").header("Trailer", "Expires").header(
      "Content-type", content_type_).header("Access-Control-Allow-Origin", "*").write(connection_);

  // Send video stream header
  connection_->write_and_clear(header_buffer);
}
Ejemplo n.º 18
0
static int vo_dx_init(void)
{
    DDSURFACEDESC2 ddsd;

    dw = g_image_width = dlpctxp->pwidth;
    dh = g_image_height = dlpctxp->pheight;
    g_screen_width = GetSystemMetrics(SM_CXSCREEN);
    g_screen_height = GetSystemMetrics(SM_CYSCREEN);
    dx = (g_screen_width - dw) / 2;
    dy = (g_screen_height - dh) / 2;

    if (FALSE == DxCreateWindow()) {
        av_log(NULL, AV_LOG_ERROR, "DxCreateWindow step\n");
        return -1;
    }

    if (FALSE == DxInitDirectDraw()) {
        av_log(NULL, AV_LOG_ERROR, "DxInitDirectDraw step\n");
        return -1;
    }

    if (FALSE == DxCreatePrimarySurface()) {
        av_log(NULL, AV_LOG_ERROR, "DxCreatePrimarySurface step\n");
        return -1;
    }

    if (FALSE == DxCreateOverlay()) {
        av_log(NULL, AV_LOG_ERROR, "DxCreateOverlay step\n");
        if (FALSE == DxCreateOffbuffer()) {
            av_log(NULL, AV_LOG_ERROR, "DxCreateOffbuffer step\n");
            return -1;
        }
    }

    if (FALSE == DxGetCaps()) {
        av_log(NULL, AV_LOG_ERROR, "DxGetCaps step\n");
        return -1;
    }
    if (FALSE == DxManageOverlay()) {
        av_log(NULL, AV_LOG_ERROR, "DxManageOverlay step\n");
        return -1;
    }

    memset(&ddsd, 0, sizeof(DDSURFACEDESC2));
    ddsd.dwSize = sizeof(DDSURFACEDESC2);
    if (IDirectDrawSurface_Lock(g_lpddsBack, NULL, &ddsd,
                                DDLOCK_NOSYSLOCK | DDLOCK_WAIT, NULL) != DD_OK) {
        av_log(NULL, AV_LOG_ERROR, "Lock Back Buffer step\n");
        return -1;
    }
    g_dstride = ddsd.lPitch;
    g_image = ddsd.lpSurface;
    av_log(NULL, AV_LOG_ERROR, "vo dx init ok\n");

    /* -------- create picture ----------------- */
    my_pic = av_mallocz(sizeof(AVPicture));
    if (-1 == avpicture_alloc(my_pic, g_image_format, g_image_width,
                              g_image_height)) {
        av_log(NULL, AV_LOG_ERROR, "avpicture alloc error\n");
        return -1;
    }

    g_inited = 1;
    return 0;
}
Ejemplo n.º 19
0
void *vs_open (int v_width, int v_height)
{
	Ctx *ctx = malloc(sizeof(Ctx));
	ctx->v_width = v_width;
	ctx->v_height = v_height;

	// window
	ctx->display = XOpenDisplay(0);
	if (!ctx->display) {
		fprintf(stderr, "%s: XOpenDisplay err\n", __func__);
		exit(-1);
	}
	ctx->window = XCreateSimpleWindow(ctx->display, RootWindow(ctx->display, 0),
			100, 100, v_width, v_height, 0, BlackPixel(ctx->display, 0),
			WhitePixel(ctx->display, 0));
	ctx->screen = 0;
	ctx->gc = XCreateGC(ctx->display, ctx->window, 0, 0);
	
	XMapWindow(ctx->display, ctx->window);

	// current screen pix fmt
	Window root;
	unsigned int cx, cy, border, depth;
	int x, y;
	XGetGeometry(ctx->display, ctx->window, &root, &x, &y, &cx, &cy, &border, &depth);

	// visual info
	XMatchVisualInfo(ctx->display, ctx->screen, depth, DirectColor, &ctx->vinfo);

	// image
	ctx->image = XShmCreateImage(ctx->display, ctx->vinfo.visual, depth, ZPixmap, 0,
			&ctx->segment, cx, cy);
	if (!ctx->image) {
		fprintf(stderr, "%s: can't XShmCreateImage !\n", __func__);
		exit(-1);
	}
	ctx->segment.shmid = shmget(IPC_PRIVATE,
			ctx->image->bytes_per_line * ctx->image->height, 
			IPC_CREAT | 0777);
	if (ctx->segment.shmid < 0) {
		fprintf(stderr, "%s: shmget err\n", __func__);
		exit(-1);
	}

	ctx->segment.shmaddr = (char*)shmat(ctx->segment.shmid, 0, 0);
	if (ctx->segment.shmaddr == (char*)-1) {
		fprintf(stderr, "%s: shmat err\n", __func__);
		exit(-1);
	}

	ctx->image->data = ctx->segment.shmaddr;
	ctx->segment.readOnly = 0;
	XShmAttach(ctx->display, &ctx->segment);

	PixelFormat target_pix_fmt = PIX_FMT_NONE;
	switch (ctx->image->bits_per_pixel) {
		case 32:
			target_pix_fmt = PIX_FMT_RGB32;
			break;
		case 24:
			target_pix_fmt = PIX_FMT_RGB24;
			break;
		default:
			break;
	}

	if (target_pix_fmt == PIX_FMT_NONE) {
		fprintf(stderr, "%s: screen depth format err\n", __func__);
		free(ctx);
		return 0;
	}

	// sws
	ctx->target_pixfmt = target_pix_fmt;
	ctx->curr_width = cx;
	ctx->curr_height = cy;
	ctx->sws = sws_getContext(v_width, v_height, PIX_FMT_YUV420P,
			cx, cy, target_pix_fmt,
			SWS_FAST_BILINEAR, 0, 0, 0);

	avpicture_alloc(&ctx->pic_target, target_pix_fmt, cx, cy);

	XFlush(ctx->display);

	return ctx;
}
Ejemplo n.º 20
0
//static
void *VideoPlayer::doProcessVideo(void *args)
{
    AVFrame *frame;
    AVPicture *picture;
    AVPacket packet;
    int got_frame = 0;
    
    VideoPlayer *player = static_cast<VideoPlayer*>(args);
    
    while(! player->mStop) {
        
        if (player->mSeek) {
            player->doSeek();
        }
        
        if (player->mAccurateSeek) {
            player->doAccurateSeek();
        }

        if (av_read_frame(player->mFormatCtx, &packet) < 0) {
            vLOGE("END OF FILE.\n");
            player->mVideoEndCallback(player, "stop");
            av_free_packet(&packet);
            break;
        }
        
        if (packet.stream_index == player->mVideoStreamIndex) {
            
            avcodec_decode_video2(player->mCodecCtx, player->mFrame, &got_frame, &packet);
            
            if (got_frame == 0) {
                vLOGE("Do not get a frame.\n");
                av_free_packet(&packet);
                continue;
            }
            
            picture = new AVPicture;
            if (! picture) {
                vLOGE("new AVPicture failed.");
                continue;
            }

            avpicture_alloc(picture, PIX_FMT_RGB24, player->mWidth, player->mHeight);

            sws_scale (player->mImageConvertCtx,
                       player->mFrame->data,
                       player->mFrame->linesize,
                       0, player->mCodecCtx->height,
                       picture->data,
                       picture->linesize);
            
            player->mPictureRingBuffer.enqueue(picture);
            
        } else {
            vLOGE("Not video stream packet, ignore it.\n");
        }
        
        av_free_packet(&packet);
        usleep(10);
        
    }
    
    return nullptr;
}
Ejemplo n.º 21
0
//--------------------------------------------------------------------
// If a 24 bit video format is founded this is the preferred one, if not, the first
// returned by unicap is selected.
//
// Then it tries to set the desired width and height, if these fails, tries find the
// nearest size or to set the default width and height.
//
// On V4L devices 24 bit format is always BGR, so it needs conversion.
// On some V4L devices using non-default width/heigth it reports BGR but returns RGB.
// ffmpeg color conversion
void ofUCUtils::set_format(int w, int h) {

	unicap_format_t formats[MAX_FORMATS];
	int format_count;
	unicap_status_t status = STATUS_SUCCESS;
	int rgb24 = -1;

	if(verbose) printf("Unicap : Available formats for this device:\n");
	for (format_count = 0; SUCCESS (status) && (format_count < MAX_FORMATS); format_count++) {
		status = unicap_enumerate_formats (handle, NULL, &formats[format_count], format_count);
		if (SUCCESS (status)) {
			if (formats[format_count].bpp == 8) {
				rgb24 = format_count;
			}
			if(verbose) printf (
					"Unicap : %d: %s, min size: %dx%d, max size:%dx%d, default size: %dx%d\n",
					format_count, formats[format_count].identifier,
					formats[format_count].min_size.width,
					formats[format_count].min_size.height,
					formats[format_count].max_size.width,
					formats[format_count].max_size.height,
					formats[format_count].size.width,
					formats[format_count].size.height);
		}
	}

	if (format_count > 0) {
		int selected_format = 0;
		if (rgb24 != -1)
			selected_format = rgb24;
		format = formats[selected_format];

		bool sizeFounded = true;
		bool exactMatch  = false;
		if(w == format.size.width && h == format.size.height){
			exactMatch = true;
		}else if(w <= format.min_size.width && h <= format.min_size.height){
			format.size.width  = format.min_size.width;
			format.size.height = format.min_size.height;
		}else if(w >= format.max_size.width && h >= format.max_size.height){
			format.size.width  = format.max_size.width;
			format.size.height = format.max_size.height;
		}else{
			sizeFounded=false;
		}

		if(sizeFounded){
			if(verbose && !exactMatch)
				printf ("Unicap : Can't set video format %s, with size %dx%d\n",
								format.identifier, w, h);

			if ( !SUCCESS ( unicap_set_format (handle, &format) ) ) {
				printf ("Unicap : Failed to set alternative video format!\n");
				return;
			}
		}else{
			format.size.width  = w;
			format.size.height = h;

			//Try selected size
			if (!SUCCESS (unicap_set_format (handle, &format))) {
				printf ("Unicap : Can't set video format %s, with size %dx%d\n",
						format.identifier, w, h);


				// If selected size doesn't work try to find a supported one
				unicap_format_t format_spec;
				unicap_void_format(&format_spec);

				int nearW				= 9999999;
				int nearH				= 9999999;


				//Try with unicap reported sizes
				if(format.size_count > 0){
					if(verbose)printf ("Unicap : Available sizes: %d\n",format.size_count);

					for(int i = 0; i < format.size_count; i++){
						if(verbose) printf ("%d,%d\n",format.sizes[i].width,format.sizes[i].height);
						if(abs(w-format.sizes[i].width)<abs(w-nearW)){
							nearW = format.sizes[i].width;
							nearH = format.sizes[i].height;
						}
					}
					format.size.width  = nearW;
		            format.size.height = nearH;

		        //Try with stepping
				}else if(format.h_stepping > 1 || format.v_stepping > 1){
					//This is how many diff sizes are available for the format
		            int stepX = format.h_stepping;
		            int stepY = format.v_stepping;
		            for(int x = format.min_size.x; x <= format.max_size.x; x+= stepX)
		            {
		            	if( abs(w-x) < abs(w-nearW) ){
		            		nearW = x;
		            	}
		            }

		            for(int y = format.min_size.y; y <= format.max_size.y; y+= stepY)
		            {
		            	if( abs(h-y) < abs(h-nearH) ){
		            		nearH = y;
		            	}
		            }
		            format.size.width  = nearW;
		            format.size.height = nearH;
				}

				//Try to set founded size
				sizeFounded = SUCCESS ( unicap_set_format (handle, &format) );

				//If none of the above work, try default size
				if(!sizeFounded){
	       			if ( !SUCCESS( unicap_enumerate_formats( handle, &format_spec, &format, selected_format ) ) ) {
						printf("Unicap : Failed to get alternative video format\n");
						return;
					}

					if ( !SUCCESS ( unicap_set_format (handle, &format) ) ) {
						printf ("Unicap : Failed to set alternative video format!\n");
						return;
					}
				}
			}

		}
		if(verbose) printf("Unicap : Selected format: %s, with size %dx%d\n", format.identifier,
				format.size.width, format.size.height);

		src_pix_fmt=fourcc_to_pix_fmt(format.fourcc);
		if( src_pix_fmt==-1){
			printf("Unicap : Format not suported\n");
			return;
		}

		if(src_pix_fmt!=PIX_FMT_RGB24){
			src=new AVPicture;
			avpicture_alloc(src,src_pix_fmt,format.size.width,format.size.height);
			dst=new AVPicture;
			avpicture_alloc(dst,PIX_FMT_RGB24,format.size.width,format.size.height);
			printf("Converting to RGB24");
		}
	}
}
Ejemplo n.º 22
0
int CTool::ConvertFormat(/*[in]*/ const AVPicture &inFrame,
                         /*[in]*/ int nInWidth,
                         /*[in]*/ int nInHeight,
                         /*[in]*/ AVPixelFormat inPixelFormat,
                         /*[out]*/AVPicture &outFrame,
                         /*[in]*/ int nOutWidth,
                         /*[in]*/ int nOutHeight,
                         /*[in]*/ AVPixelFormat outPixelFormat)
{
    int nRet = 0;
    struct SwsContext* pSwsCtx = NULL;
    
    //分配输出空间  
    nRet = avpicture_alloc(&outFrame, outPixelFormat, nOutWidth, nOutHeight);
    if(nRet)
    {
        LOG_MODEL_ERROR("Tool", "avpicture_alloc fail:%x", nRet);
        return nRet;
    }
    
    if(inPixelFormat == outPixelFormat
            && nInWidth == nOutWidth
            && nInHeight == nOutHeight)
    {
        av_picture_copy(&outFrame, &inFrame, inPixelFormat,
                        nInWidth, nInHeight);
        return 0;
    }
    
    //设置图像转换上下文  
    pSwsCtx = sws_getCachedContext (NULL,
                                    nInWidth,                //源宽度  
                                    nInHeight,               //源高度  
                                    inPixelFormat,           //源格式  
                                    nOutWidth,               //目标宽度  
                                    nOutHeight,              //目标高度  
                                    outPixelFormat,          //目的格式  
                                    SWS_FAST_BILINEAR,       //转换算法  
                                    NULL, NULL, NULL);
    if(NULL == pSwsCtx)
    {
        LOG_MODEL_ERROR("Tool", "sws_getContext false");
        avpicture_free(&outFrame);
        return -3;
    }
    
    //进行图片转换  
    nRet = sws_scale(pSwsCtx,
                     inFrame.data, inFrame.linesize,
                     0, nInHeight,
                     outFrame.data, outFrame.linesize);
    if(nRet < 0)
    {
        LOG_MODEL_ERROR("Tool", "sws_scale fail:%x", nRet);
        avpicture_free(&outFrame);
    }
    else
    {
        nRet = 0;
    }
    
    sws_freeContext(pSwsCtx);
    return nRet;
}
Ejemplo n.º 23
0
Archivo: raw.c Proyecto: AmesianX/pilot
int prepare(VideoOut_sV *video, const char *filename, const char *vcodec, const int width, const int height, const int bitrate,
             const unsigned int numerator, const unsigned int denominator)
{

    /* must be called before using avcodec lib */
    avcodec_init();

    video->frameNr = 0;
    video->errorMessage = NULL;
    video->filename = malloc(strlen(filename)+1);
    strcpy(video->filename, filename);

    /* initialize libavcodec, and register all codecs and formats */
    av_register_all();

    /* allocate the output media context */
    avformat_alloc_output_context2(&video->fc, NULL, NULL, filename);
    if (!video->fc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&video->fc, NULL, "mpeg", filename);
    }
    if (!video->fc) {
        const char *s = "Could allocate the output context, even MPEG is not available.\n";
        fprintf(stderr, s);
        setErrorMessage(video, s);
        return 2;
    }
    video->format = video->fc->oformat;
    printf("Using format %s.\n", video->format->name);

    /* Use the given vcodec if it is not NULL */
    if (vcodec != NULL) {
        AVCodec *codec = avcodec_find_encoder_by_name(vcodec);
        if (codec == NULL) {
            char s[strlen(vcodec)+50];
            sprintf(s, "No codec available for %s.\n", vcodec);
            fprintf(stderr, s);
            setErrorMessage(video, s);
            return 2;
        }
        printf("Found codec: %s\n", codec->long_name);
        video->format->video_codec = codec->id;
    }

    /* add the audio and video streams using the default format codecs
       and initialize the codecs */
    video->streamV = NULL;
    if (video->format->video_codec != CODEC_ID_NONE) {

        video->streamV = av_new_stream(video->fc, 0);
        if (!video->streamV) {
            const char *s = "Could not allocate the video stream.\n";
            fprintf(stderr, s);
            setErrorMessage(video, s);
            return 2;
        }

        AVCodecContext *cc = video->streamV->codec;

        cc->codec_id = video->format->video_codec;
        cc->codec_type = AVMEDIA_TYPE_VIDEO;

        cc->bit_rate = bitrate;

        /* resolution must be a multiple of two */
        cc->width = width;
        cc->height = height;


        /* time base: this is the fundamental unit of time (in seconds) in terms
           of which frame timestamps are represented. for fixed-fps content,
           timebase should be 1/framerate and timestamp increments should be
           identically 1. */
        cc->time_base= (AVRational){numerator, denominator};
        cc->gop_size = 12; /* emit one intra frame every ten frames */


        cc->pix_fmt = PIX_FMT_YUV420P;
        if (cc->codec_id == CODEC_ID_MPEG2VIDEO || cc->codec_id == CODEC_ID_MPEG4) {
            /* just for testing, we also add B frames */
            cc->max_b_frames = 2;
        }
        if (cc->codec_id == CODEC_ID_MPEG1VIDEO){
            /* Needed to avoid using macroblocks in which some coeffs overflow.
               This does not happen with normal video, it just happens here as
               the motion of the chroma plane does not match the luma plane. */
            cc->mb_decision=2;
        }
        // some formats want stream headers to be separate
        if(video->fc->oformat->flags & AVFMT_GLOBALHEADER) {
            cc->flags |= CODEC_FLAG_GLOBAL_HEADER;
        }

        video->rgbConversionContext = sws_getContext(
                    cc->width, cc->height,
                    PIX_FMT_BGRA,
                    cc->width, cc->height,
                    cc->pix_fmt,
                    SWS_BICUBIC, NULL, NULL, NULL);

        // One line size for each plane. RGB consists of one plane only.
        // (YUV420p consists of 3, Y, Cb, and Cr
        video->rgbLinesize[0] = cc->width*4;
        video->rgbLinesize[1] = 0;
        video->rgbLinesize[2] = 0;
        video->rgbLinesize[3] = 0;

        if (video->rgbConversionContext == NULL) {
            char s[200];
            sprintf(s, "Cannot initialize the RGB conversion context. Incorrect size (%dx%d)?\n", cc->width, cc->height);
            fprintf(stderr, s);
            setErrorMessage(video, s);
            return 2;
        }


        printf("Settings: %dx%d, %d bits/s (tolerance: %d), %d fps\n", cc->width, cc->height,
               cc->bit_rate, cc->bit_rate_tolerance, ((float)cc->time_base.den)/cc->time_base.num);
        fflush(stdout);
    } else {
        const char *s = "No codec ID given.\n";
        fprintf(stderr, s);
        setErrorMessage(video, s);
        return 2;
    }

    av_dump_format(video->fc, 0, filename, 1);

    /* now that all the parameters are set, we can open the audio and
       video codecs and allocate the necessary encode buffers */
    if (video->streamV) {
        int ret = open_video(video);
        if (ret != 0) {
            return ret;
        }
    } else {
        const char *s = "Could not open video stream.\n";
        fprintf(stderr, s);
        setErrorMessage(video, s);
        return 2;
    }



    /* open the output file, if needed */
    if (!(video->format->flags & AVFMT_NOFILE)) {
        if (avio_open(&video->fc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "could not open %s\n", video->filename);
            char *msg = "Could not open file: ";
            char *msgAll = malloc(sizeof(char) * (strlen(filename) + strlen(msg)));
            strcpy(msgAll, msg);
            strcat(msgAll, filename);
            fprintf(stderr, msgAll);
            setErrorMessage(video, msgAll);
            free(msgAll);
            return 5;
        }
    }

    /* write the stream header, if any */
    avformat_write_header(video->fc, NULL);


    /* alloc image and output buffer */
    video->outbufSizeV = avpicture_get_size(video->streamV->codec->pix_fmt, width, height);
    video->outbufV = av_malloc(video->outbufSizeV);

    video->picture = avcodec_alloc_frame();
    avpicture_alloc((AVPicture*)video->picture, video->streamV->codec->pix_fmt,
                    video->streamV->codec->width, video->streamV->codec->height);
    if (!video->picture) {
        const char *s = "Could not allocate AVPicture.\n";
        fprintf(stderr, s);
        setErrorMessage(video, s);
        return 2;
    }

    return 0;
}
Ejemplo n.º 24
0
static void
libav_deliver_frame(video_decoder_t *vd,
                    media_pipe_t *mp, media_queue_t *mq,
                    AVCodecContext *ctx, AVFrame *frame,
                    const media_buf_meta_t *mbm, int decode_time,
                    const media_codec_t *mc)
{
  frame_info_t fi;

  /* Compute aspect ratio */
  switch(mbm->mbm_aspect_override) {
  case 0:

    fi.fi_dar_num = frame->width;
    fi.fi_dar_den = frame->height;

    if(frame->sample_aspect_ratio.num) {
      fi.fi_dar_num *= frame->sample_aspect_ratio.num;
      fi.fi_dar_den *= frame->sample_aspect_ratio.den;
    } else if(mc->sar_num) {
      fi.fi_dar_num *= mc->sar_num;
      fi.fi_dar_den *= mc->sar_den;
    }

    break;
  case 1:
    fi.fi_dar_num = 4;
    fi.fi_dar_den = 3;
    break;
  case 2:
    fi.fi_dar_num = 16;
    fi.fi_dar_den = 9;
    break;
  }

  int64_t pts = video_decoder_infer_pts(mbm, vd,
					frame->pict_type == AV_PICTURE_TYPE_B);

  int duration = mbm->mbm_duration;

  if(!vd_valid_duration(duration)) {
    /* duration is zero or very invalid, use duration from last output */
    duration = vd->vd_estimated_duration;
  }

  if(pts == AV_NOPTS_VALUE && vd->vd_nextpts != AV_NOPTS_VALUE)
    pts = vd->vd_nextpts; /* no pts set, use estimated pts */

  if(pts != AV_NOPTS_VALUE && vd->vd_prevpts != AV_NOPTS_VALUE) {
    /* we know PTS of a prior frame */
    int64_t t = (pts - vd->vd_prevpts) / vd->vd_prevpts_cnt;

    if(vd_valid_duration(t)) {
      /* inter frame duration seems valid, store it */
      vd->vd_estimated_duration = t;
      if(duration == 0)
	duration = t;

    }
  }
  
  duration += frame->repeat_pict * duration / 2;
 
  if(pts != AV_NOPTS_VALUE) {
    vd->vd_prevpts = pts;
    vd->vd_prevpts_cnt = 0;
  }
  vd->vd_prevpts_cnt++;

  if(duration == 0) {
    TRACE(TRACE_DEBUG, "Video", "Dropping frame with duration = 0");
    return;
  }

  prop_set_int(mq->mq_prop_too_slow, decode_time > duration);

  if(pts != AV_NOPTS_VALUE) {
    vd->vd_nextpts = pts + duration;
  } else {
    vd->vd_nextpts = AV_NOPTS_VALUE;
  }
#if 0
  static int64_t lastpts = AV_NOPTS_VALUE;
  if(lastpts != AV_NOPTS_VALUE) {
    printf("DEC: %20"PRId64" : %-20"PRId64" %d %"PRId64" %6d %d\n", pts, pts - lastpts, mbm->mbm_drive_clock,
           mbm->mbm_delta, duration, mbm->mbm_sequence);
    if(pts - lastpts > 1000000) {
      abort();
    }
  }
  lastpts = pts;
#endif

  vd->vd_interlaced |=
    frame->interlaced_frame && !mbm->mbm_disable_deinterlacer;

  fi.fi_width = frame->width;
  fi.fi_height = frame->height;
  fi.fi_pts = pts;
  fi.fi_epoch = mbm->mbm_epoch;
  fi.fi_delta = mbm->mbm_delta;
  fi.fi_duration = duration;
  fi.fi_drive_clock = mbm->mbm_drive_clock;

  fi.fi_interlaced = !!vd->vd_interlaced;
  fi.fi_tff = !!frame->top_field_first;
  fi.fi_prescaled = 0;

  fi.fi_color_space = 
    ctx->colorspace < ARRAYSIZE(libav_colorspace_tbl) ? 
    libav_colorspace_tbl[ctx->colorspace] : 0;

  fi.fi_type = 'LAVC';

  // Check if we should skip directly to convert code
  if(vd->vd_convert_width  != frame->width ||
     vd->vd_convert_height != frame->height ||
     vd->vd_convert_pixfmt != frame->format) {

    // Nope, go ahead and deliver frame as-is

    fi.fi_data[0] = frame->data[0];
    fi.fi_data[1] = frame->data[1];
    fi.fi_data[2] = frame->data[2];

    fi.fi_pitch[0] = frame->linesize[0];
    fi.fi_pitch[1] = frame->linesize[1];
    fi.fi_pitch[2] = frame->linesize[2];

    fi.fi_pix_fmt = frame->format;
    fi.fi_avframe = frame;

    int r = video_deliver_frame(vd, &fi);

    /* return value
     * 0  = OK
     * 1  = Need convert to YUV420P
     * -1 = Fail
     */

    if(r != 1)
      return;
  }

  // Need to convert frame

  vd->vd_sws =
    sws_getCachedContext(vd->vd_sws,
                         frame->width, frame->height, frame->format,
                         frame->width, frame->height, PIX_FMT_YUV420P,
                         0, NULL, NULL, NULL);

  if(vd->vd_sws == NULL) {
    TRACE(TRACE_ERROR, "Video", "Unable to convert from %s to %s",
	  av_get_pix_fmt_name(frame->format),
	  av_get_pix_fmt_name(PIX_FMT_YUV420P));
    return;
  }

  if(vd->vd_convert_width  != frame->width  ||
     vd->vd_convert_height != frame->height ||
     vd->vd_convert_pixfmt != frame->format) {
    avpicture_free(&vd->vd_convert);

    vd->vd_convert_width  = frame->width;
    vd->vd_convert_height = frame->height;
    vd->vd_convert_pixfmt = frame->format;

    avpicture_alloc(&vd->vd_convert, PIX_FMT_YUV420P, frame->width,
                    frame->height);

    TRACE(TRACE_DEBUG, "Video", "Converting from %s to %s",
	  av_get_pix_fmt_name(frame->format),
	  av_get_pix_fmt_name(PIX_FMT_YUV420P));
  }

  sws_scale(vd->vd_sws, (void *)frame->data, frame->linesize, 0,
            frame->height, vd->vd_convert.data, vd->vd_convert.linesize);

  fi.fi_data[0] = vd->vd_convert.data[0];
  fi.fi_data[1] = vd->vd_convert.data[1];
  fi.fi_data[2] = vd->vd_convert.data[2];

  fi.fi_pitch[0] = vd->vd_convert.linesize[0];
  fi.fi_pitch[1] = vd->vd_convert.linesize[1];
  fi.fi_pitch[2] = vd->vd_convert.linesize[2];

  fi.fi_type = 'LAVC';
  fi.fi_pix_fmt = PIX_FMT_YUV420P;
  fi.fi_avframe = NULL;
  video_deliver_frame(vd, &fi);
}
Ejemplo n.º 25
0
bool CFFmpegImage::Decode(unsigned char * const pixels, unsigned int width, unsigned int height,
                          unsigned int pitch, unsigned int format)
{
  if (m_width == 0 || m_height == 0 || format != XB_FMT_A8R8G8B8)
    return false;

  if (!m_pFrame || !m_pFrame->data[0])
  {
    CLog::LogFunction(LOGERROR, __FUNCTION__, "AVFrame member not allocated");
    return false;
  }

  AVPicture* pictureRGB = static_cast<AVPicture*>(av_mallocz(sizeof(AVPicture)));
  if (!pictureRGB)
  {
    CLog::LogFunction(LOGERROR, __FUNCTION__, "AVPicture could not be allocated");
    return false;
  }

  int size = avpicture_fill(pictureRGB, NULL, AV_PIX_FMT_RGB32, width, height);
  if (size < 0)
  {
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVPicture member with %i x %i pixes", width, height);
    av_free(pictureRGB);
    return false;
  }

  bool needsCopy = false;
  int pixelsSize = pitch * height;
  if (size == pixelsSize && (int) pitch == pictureRGB->linesize[0])
  {
    // We can use the pixels buffer directly
    pictureRGB->data[0] = pixels;
  }
  else
  {
    // We need an extra buffer and copy it manually afterwards
    if (avpicture_alloc(pictureRGB, AV_PIX_FMT_RGB32, width, height) < 0)
    {
      CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate temp buffer of size %i bytes", size);
      av_free(pictureRGB);
      return false;
    }
    needsCopy = true;
  }

  // Especially jpeg formats are full range this we need to take care here
  // Input Formats like RGBA are handled correctly automatically
  AVColorRange range = av_frame_get_color_range(m_pFrame);
  AVPixelFormat pixFormat = ConvertFormats(m_pFrame);

  // assumption quadratic maximums e.g. 2048x2048
  float ratio = m_width / (float) m_height;
  unsigned int nHeight = m_originalHeight;
  unsigned int nWidth = m_originalWidth;
  if (nHeight > height)
  {
    nHeight = height;
    nWidth = (unsigned int) (nHeight * ratio + 0.5f);
  }
  if (nWidth > width)
  {
    nWidth = width;
    nHeight = (unsigned int) (nWidth / ratio + 0.5f);
  }

  struct SwsContext* context = sws_getContext(m_originalWidth, m_originalHeight, pixFormat,
    nWidth, nHeight, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);

  if (range == AVCOL_RANGE_JPEG)
  {
    int* inv_table = nullptr;
    int* table = nullptr;
    int srcRange, dstRange, brightness, contrast, saturation;
    sws_getColorspaceDetails(context, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation);
    srcRange = 1;
    sws_setColorspaceDetails(context, inv_table, srcRange, table, dstRange, brightness, contrast, saturation);
  }

  sws_scale(context, m_pFrame->data, m_pFrame->linesize, 0, m_originalHeight,
    pictureRGB->data, pictureRGB->linesize);
  sws_freeContext(context);

  if (needsCopy)
  {
    int minPitch = std::min((int)pitch, pictureRGB->linesize[0]);
    if (minPitch < 0)
    {
      CLog::LogFunction(LOGERROR, __FUNCTION__, "negative pitch or height");
      av_free(pictureRGB);
      return false;
    }
    const unsigned char *src = pictureRGB->data[0];
    unsigned char* dst = pixels;

    for (unsigned int y = 0; y < nHeight; y++)
    {
      memcpy(dst, src, minPitch);
      src += pictureRGB->linesize[0];
      dst += pitch;
    }

    avpicture_free(pictureRGB);
  }
  pictureRGB->data[0] = nullptr;
  avpicture_free(pictureRGB);

  // update width and height original dimensions are kept
  m_height = nHeight;
  m_width = nWidth;

  return true;
}
Ejemplo n.º 26
0
bool FFMpegVideoEncoder::openVideoEncoder(const VideoEncoderParam& param)
{
	encodeParam = param;
	AVCodecID codecID;
	switch(param.codec)
	{
		case CodecType::H264:
			codecID = CODEC_ID_H264;
			break;
		case CodecType::MPEG4:
			codecID = CODEC_ID_MPEG4;
			break;
		default:
			codecID = CODEC_ID_MPEG4;
			break;

	}
	codec = avcodec_find_encoder(codecID);
	codecCtx = avcodec_alloc_context3(codec);
	codecCtx->gop_size = param.gop;
	codecCtx->max_b_frames = param.bframe;
	codecCtx->width = param.width;
	codecCtx->height = param.height;
	codecCtx->time_base.num = param.fpsDen;
	codecCtx->time_base.den = param.fpsNum;
	codecCtx->pix_fmt = param.colorSpace;
	codecCtx->bit_rate = param.bitrate * 1000;
	codecCtx->bit_rate_tolerance = codecCtx->bit_rate + codecCtx->bit_rate /100;
	codecCtx->delay = 0;
	//codecCtx->coder_type = 0;
	//codecCtx->me_cmp |= 1;
	//codecCtx->me_method = ME_HEX;
	//codecCtx->me_subpel_quality = 0;
	//codecCtx->me_range = 16;
	//codecCtx->scenechange_threshold = 40;
	//codecCtx->i_quant_factor = 0.71;
	//codecCtx->b_frame_strategy = 1;
	//codecCtx->qcompress = 0.5;
	//codecCtx->qmin = 2;
	//codecCtx->qmax = 31;
	//codecCtx->max_qdiff = 4;
	//codecCtx->refs = 3;
	//codecCtx->trellis = 1;
	//codecCtx->chromaoffset = 0;
	//codecCtx->thread_count = 1;

	int error = avcodec_open2(codecCtx, codec, NULL);
	if (error < 0)
	{
		std::cout << "Open video encoder failed" << std::endl;
	}
	pic = av_frame_alloc();
	pic->width = param.width;
	pic->height = param.height;
	pic->format = param.colorSpace;
	avpicture_alloc((AVPicture*)pic, AV_PIX_FMT_YUV420P, pic->width, pic->height);
	convertCtx = sws_getContext(param.width, param.height, PIX_FMT_RGB24,
	    		param.width, param.height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

	return true;
}
Ejemplo n.º 27
0
static int vo_x11_init ()
{
    int ret;
    unsigned long xswamask, event_mask;
    XSetWindowAttributes xswa;
    XSizeHints hint;
    XVisualInfo xvinfo;
    char *dspname;

    xswamask = CWBackingStore | CWBorderPixel;

    src_pic_fmt = dlpctxp->pixfmt;
    dw = dlpctxp->pwidth;
    dh = dlpctxp->pheight;

    dspname = XDisplayName (NULL);
    av_log (NULL, AV_LOG_INFO, "Open X11 display %s\n", dspname);
    Xdisplay = XOpenDisplay (dspname);
    if (!Xdisplay)
    {
        av_log (NULL, AV_LOG_ERROR, "X11,%d: XOpenDisplay\n", __LINE__);
        return -1;
    }

    Xscreen = DefaultScreen (Xdisplay);
    Xrootwin = RootWindow (Xdisplay, Xscreen);
    screen_width = DisplayWidth (Xdisplay, Xscreen);
    screen_height = DisplayHeight (Xdisplay, Xscreen);

    dx = (screen_width - dw) / 2;
    dy = (screen_height - dh) / 2;

    Xdepth = XDefaultDepth (Xdisplay, 0);
    if (!XMatchVisualInfo (Xdisplay, Xscreen, Xdepth, DirectColor, &xvinfo))
        XMatchVisualInfo (Xdisplay, Xscreen, Xdepth, TrueColor, &xvinfo);

    xswa.background_pixel = 0;
    xswa.border_pixel = 0;
    xswa.backing_store = Always;
    xswa.bit_gravity = StaticGravity;

    Xvowin = XCreateWindow (Xdisplay, Xrootwin, dx, dy, dw, dh, 0, Xdepth, CopyFromParent, CopyFromParent, xswamask, &xswa);

    hint.x = dx;
    hint.y = dy;
    hint.width = dw;
    hint.height = dh;
    hint.flags = PPosition | PSize;
    XSetStandardProperties (Xdisplay, Xvowin, Xtitle, Xtitle, None, NULL, 0, &hint);

    XMapWindow (Xdisplay, Xvowin);
    XClearWindow (Xdisplay, Xvowin);

    event_mask = StructureNotifyMask | KeyPressMask | ExposureMask;
    XSelectInput (Xdisplay, Xvowin, event_mask);
    XSync (Xdisplay, False);

    Xvogc = XCreateGC (Xdisplay, Xrootwin, 0L, NULL);
    XSetForeground (Xdisplay, Xvogc, WhitePixel (Xdisplay, Xscreen));

    Ximg = XCreateImage (Xdisplay, xvinfo.visual, Xdepth, ZPixmap, 0, NULL, dw, dh, 8, 0);

    {
        int i, fontcount;
        char **list;
        list = XListFonts (Xdisplay, "-*-helvetica-*-*-*-*-0-0-*-*-*-0-*-*", 200, &fontcount);
        for (i = 0; i < fontcount; i++)
        {
            if (NULL == Xfont_120)
                Xfont_120 = LoadQueryScalableFont (Xdisplay, Xscreen, list[i], 120);
            if (NULL == Xfont_240)
                Xfont_240 = LoadQueryScalableFont (Xdisplay, Xscreen, list[i], 240);
            if (Xfont_120 && Xfont_240)
                break;
        }
        XFreeFontNames (list);

        if (NULL == Xfont_120 || NULL == Xfont_240)
        {
            av_log (NULL, AV_LOG_ERROR, "XLoadQueryFont: failed\n");
        }

        if (dw < 600)
            Xfont = Xfont_120;
        else
            Xfont = Xfont_240;

        if (Xfont)
            XSetFont (Xdisplay, Xvogc, Xfont->fid);
    }

    switch (Ximg->bits_per_pixel)
    {
    case 32:
        my_pic_fmt = PIX_FMT_RGB32;
        break;
    case 24:
        my_pic_fmt = PIX_FMT_RGB24;
        break;
    case 16:
        my_pic_fmt = PIX_FMT_RGB565;
        break;
    case 15:
        my_pic_fmt = PIX_FMT_RGB555;
        break;
    case 8:
        my_pic_fmt = PIX_FMT_RGB8;
        break;
    }
    av_log (NULL, AV_LOG_INFO, "bits_per_pixel: %d\n", Ximg->bits_per_pixel);
    /*-----------------------------------------------------------------------------
     *  my picture for rgb
     *-----------------------------------------------------------------------------*/
    my_pic = av_mallocz (sizeof (AVPicture));
    ret = avpicture_alloc (my_pic, my_pic_fmt, dw, dh);
    if (-1 == ret)
    {
        av_log (NULL, AV_LOG_ERROR, "avpicture alloc error\n");
        return -1;
    }

    vo_lock_init ();

    return 0;
}
Ejemplo n.º 28
0
static void handle_packet(struct vidsrc_st *st, AVPacket *pkt)
{
	AVPicture pict;
	AVFrame *frame = NULL;
	struct vidframe vf;
	struct vidsz sz;
	unsigned i;

	if (st->codec) {
		int got_pict, ret;

#if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100)
		frame = av_frame_alloc();
#else
		frame = avcodec_alloc_frame();
#endif

#if LIBAVCODEC_VERSION_INT <= ((52<<16)+(23<<8)+0)
		ret = avcodec_decode_video(st->ctx, frame, &got_pict,
					   pkt->data, pkt->size);
#else
		ret = avcodec_decode_video2(st->ctx, frame,
					    &got_pict, pkt);
#endif
		if (ret < 0 || !got_pict)
			return;

		sz.w = st->ctx->width;
		sz.h = st->ctx->height;

		/* check if size changed */
		if (!vidsz_cmp(&sz, &st->sz)) {
			info("size changed: %d x %d  ---> %d x %d\n",
			     st->sz.w, st->sz.h, sz.w, sz.h);
			st->sz = sz;

			if (st->sws) {
				sws_freeContext(st->sws);
				st->sws = NULL;
			}
		}

		if (!st->sws) {
			info("scaling: %d x %d  --->  %d x %d\n",
			     st->sz.w, st->sz.h,
			     st->app_sz.w, st->app_sz.h);

			st->sws = sws_getContext(st->sz.w, st->sz.h,
						 st->ctx->pix_fmt,
						 st->app_sz.w, st->app_sz.h,
						 PIX_FMT_YUV420P,
						 SWS_BICUBIC,
						 NULL, NULL, NULL);
			if (!st->sws)
				return;
		}

		ret = avpicture_alloc(&pict, PIX_FMT_YUV420P,
				      st->app_sz.w, st->app_sz.h);
		if (ret < 0)
			return;

		ret = sws_scale(st->sws,
				SRCSLICE_CAST frame->data, frame->linesize,
				0, st->sz.h, pict.data, pict.linesize);
		if (ret <= 0)
			goto end;
	}
	else {
		avpicture_fill(&pict, pkt->data, PIX_FMT_YUV420P,
			       st->sz.w, st->sz.h);
	}

	vf.size = st->app_sz;
	vf.fmt  = VID_FMT_YUV420P;
	for (i=0; i<4; i++) {
		vf.data[i]     = pict.data[i];
		vf.linesize[i] = pict.linesize[i];
	}

	st->frameh(&vf, st->arg);

 end:
	if (st->codec)
		avpicture_free(&pict);

	if (frame) {
#if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100)
		av_frame_free(&frame);
#else
		av_free(frame);
#endif
	}
}
Ejemplo n.º 29
0
DSCapture::DSCapture()
{
	int ret;
	av_register_all();
	avdevice_register_all();
	avformat_network_init();
	AVInputFormat * a= av_find_input_format("dshow");	
	// Open the video file
	m_pInputFormatContext = NULL;
	if((ret= avformat_open_input(&m_pInputFormatContext, "video=Integrated Camera", a, NULL))!=0) {

	}

	::av_find_stream_info(m_pInputFormatContext);
	m_nInputAudioStreamIndex = ::av_find_best_stream(m_pInputFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, &m_pInputAudioCodec, 0);
	m_nInputVideoStreamIndex = ::av_find_best_stream(m_pInputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &m_pInputVideoCodec, 0);

	if( m_nInputAudioStreamIndex >= 0 ) {
		m_pInputAudioCodecContext = m_pInputFormatContext->streams[m_nInputAudioStreamIndex]->codec;
		::avcodec_open2(m_pInputAudioCodecContext, m_pInputAudioCodec, NULL);
	}
	if( m_nInputVideoStreamIndex >= 0 ) {
		m_pInputVideoCodecContext = m_pInputFormatContext->streams[m_nInputVideoStreamIndex]->codec;
		::avcodec_open2(m_pInputVideoCodecContext, m_pInputVideoCodec, NULL);
	}

	//output:
	ret = ::avformat_alloc_output_context2(&m_pOutputFormatContext, NULL, "flv", "rtmp://127.0.0.1:8080/live/live1");
	m_pOutputVideoCodec = ::avcodec_find_encoder(AV_CODEC_ID_H264);

	m_pOutputVideoStream = ::av_new_stream(m_pOutputFormatContext, 0);
	m_pOutputVideoCodecContext = m_pOutputVideoStream->codec;
	m_pOutputVideoCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
	m_pOutputVideoCodecContext->width = 320;
	m_pOutputVideoCodecContext->height = 240;
	m_pOutputVideoCodecContext->time_base.num = 1;
	m_pOutputVideoCodecContext->time_base.den = 25;
	m_pOutputVideoCodecContext->gop_size = 12;
	m_pOutputVideoCodecContext->bit_rate = 125000;
	m_pOutputVideoCodecContext->me_range = 16;
	m_pOutputVideoCodecContext->max_qdiff = 4;
	m_pOutputVideoCodecContext->qmax = 15;
	m_pOutputVideoCodecContext->qmin = 10;
	m_pOutputVideoCodecContext->qcompress = 0.6;
	m_pOutputVideoCodecContext->profile = FF_PROFILE_H264_BASELINE;
	if (m_pOutputFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
		m_pOutputVideoCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
	if( m_pOutputVideoCodecContext->flags & AVFMT_GLOBALHEADER )
		m_pOutputVideoCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;

	ret = ::avcodec_open2(m_pOutputVideoCodecContext, m_pOutputVideoCodec, NULL);
	ret = ::avio_open(&m_pOutputFormatContext->pb, "rtmp://127.0.0.1:8080/live/live1", AVIO_FLAG_WRITE);
	::avformat_write_header(m_pOutputFormatContext, NULL);

	//frame to hold the decoded data:
	m_pFrame = ::avcodec_alloc_frame();
	ret = avpicture_alloc((AVPicture*)m_pFrame, AV_PIX_FMT_YUV420P, 640, 480);

	//init the sws context:
	m_pSwsContext = sws_getContext(640, 480, AV_PIX_FMT_YUYV422,
		640, 480, AV_PIX_FMT_YUV420P, 
		SWS_BICUBIC, NULL, NULL, NULL);
	m_pMidFrame = ::avcodec_alloc_frame();
	ret = avpicture_alloc((AVPicture*)m_pMidFrame, AV_PIX_FMT_YUV420P, 640, 480);

	m_nBasePTS = 0;
}
Ejemplo n.º 30
0
static buf_t *
screenshot_compress(pixmap_t *pm, int codecid)
{
  AVCodec *codec = avcodec_find_encoder(codecid);
  if(codec == NULL)
    return NULL;

  const int width = pm->pm_width;
  const int height = pm->pm_height;

  AVCodecContext *ctx = avcodec_alloc_context3(codec);
  ctx->pix_fmt = codec->pix_fmts[0];
  ctx->time_base.den = 1;
  ctx->time_base.num = 1;
  ctx->sample_aspect_ratio.num = 1;
  ctx->sample_aspect_ratio.den = 1;
  ctx->width  = width;
  ctx->height = height;

  if(avcodec_open2(ctx, codec, NULL) < 0) {
    TRACE(TRACE_ERROR, "ScreenShot", "Unable to open image encoder");
    return NULL;
  }

  AVFrame *oframe = av_frame_alloc();

  avpicture_alloc((AVPicture *)oframe, ctx->pix_fmt, width, height);

  const uint8_t *ptr[4] = {};
  int strides[4] = {0};

  if(pm->pm_flags & PIXMAP_VFLIP) {
    ptr[0] = pm->pm_data + pm->pm_linesize * (height - 1);
    strides[0] = -pm->pm_linesize;
  } else {
    ptr[0] = pm->pm_data;
    strides[0] = pm->pm_linesize;
  }
  struct SwsContext *sws;
  sws = sws_getContext(width, height, AV_PIX_FMT_RGB32,
                       width, height, ctx->pix_fmt, SWS_BILINEAR,
                       NULL, NULL, NULL);

  sws_scale(sws, ptr, strides,
            0, height, &oframe->data[0], &oframe->linesize[0]);
  sws_freeContext(sws);

  oframe->pts = AV_NOPTS_VALUE;
  AVPacket out;
  memset(&out, 0, sizeof(AVPacket));
  int got_packet;
  int r = avcodec_encode_video2(ctx, &out, oframe, &got_packet);
  buf_t *b;
  if(r >= 0 && got_packet) {
    b = buf_create_and_adopt(out.size, out.data, &av_free);
  } else {
    assert(out.data == NULL);
    b = NULL;
  }
  av_frame_free(&oframe);
  avcodec_close(ctx);
  av_free(ctx);
  return b;
}